1 // SPDX-License-Identifier: GPL-2.0-only
2 /* xfrm_user.c: User interface to configure xfrm engine.
4 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
8 * Kazunori MIYAZAWA @USAGI
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
14 #include <linux/crypto.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <linux/socket.h>
20 #include <linux/string.h>
21 #include <linux/net.h>
22 #include <linux/skbuff.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
29 #include <net/netlink.h>
31 #include <linux/uaccess.h>
32 #if IS_ENABLED(CONFIG_IPV6)
33 #include <linux/in6.h>
35 #include <asm/unaligned.h>
37 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
39 struct nlattr *rt = attrs[type];
40 struct xfrm_algo *algp;
46 if (nla_len(rt) < (int)xfrm_alg_len(algp))
59 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
63 static int verify_auth_trunc(struct nlattr **attrs)
65 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
66 struct xfrm_algo_auth *algp;
72 if (nla_len(rt) < (int)xfrm_alg_auth_len(algp))
75 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
79 static int verify_aead(struct nlattr **attrs)
81 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
82 struct xfrm_algo_aead *algp;
88 if (nla_len(rt) < (int)aead_len(algp))
91 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
95 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
96 xfrm_address_t **addrp)
98 struct nlattr *rt = attrs[type];
101 *addrp = nla_data(rt);
104 static inline int verify_sec_ctx_len(struct nlattr **attrs)
106 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
107 struct xfrm_user_sec_ctx *uctx;
113 if (uctx->len > nla_len(rt) ||
114 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
120 static inline int verify_replay(struct xfrm_usersa_info *p,
121 struct nlattr **attrs)
123 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
124 struct xfrm_replay_state_esn *rs;
127 return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
131 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
134 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
135 nla_len(rt) != sizeof(*rs))
138 /* As only ESP and AH support ESN feature. */
139 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
142 if (p->replay_window != 0)
148 static int verify_newsa_info(struct xfrm_usersa_info *p,
149 struct nlattr **attrs)
159 #if IS_ENABLED(CONFIG_IPV6)
170 switch (p->sel.family) {
175 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
181 #if IS_ENABLED(CONFIG_IPV6)
182 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
196 switch (p->id.proto) {
198 if ((!attrs[XFRMA_ALG_AUTH] &&
199 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
200 attrs[XFRMA_ALG_AEAD] ||
201 attrs[XFRMA_ALG_CRYPT] ||
202 attrs[XFRMA_ALG_COMP] ||
208 if (attrs[XFRMA_ALG_COMP])
210 if (!attrs[XFRMA_ALG_AUTH] &&
211 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
212 !attrs[XFRMA_ALG_CRYPT] &&
213 !attrs[XFRMA_ALG_AEAD])
215 if ((attrs[XFRMA_ALG_AUTH] ||
216 attrs[XFRMA_ALG_AUTH_TRUNC] ||
217 attrs[XFRMA_ALG_CRYPT]) &&
218 attrs[XFRMA_ALG_AEAD])
220 if (attrs[XFRMA_TFCPAD] &&
221 p->mode != XFRM_MODE_TUNNEL)
226 if (!attrs[XFRMA_ALG_COMP] ||
227 attrs[XFRMA_ALG_AEAD] ||
228 attrs[XFRMA_ALG_AUTH] ||
229 attrs[XFRMA_ALG_AUTH_TRUNC] ||
230 attrs[XFRMA_ALG_CRYPT] ||
231 attrs[XFRMA_TFCPAD] ||
232 (ntohl(p->id.spi) >= 0x10000))
236 #if IS_ENABLED(CONFIG_IPV6)
237 case IPPROTO_DSTOPTS:
238 case IPPROTO_ROUTING:
239 if (attrs[XFRMA_ALG_COMP] ||
240 attrs[XFRMA_ALG_AUTH] ||
241 attrs[XFRMA_ALG_AUTH_TRUNC] ||
242 attrs[XFRMA_ALG_AEAD] ||
243 attrs[XFRMA_ALG_CRYPT] ||
244 attrs[XFRMA_ENCAP] ||
245 attrs[XFRMA_SEC_CTX] ||
246 attrs[XFRMA_TFCPAD] ||
247 !attrs[XFRMA_COADDR])
256 if ((err = verify_aead(attrs)))
258 if ((err = verify_auth_trunc(attrs)))
260 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
262 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
264 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
266 if ((err = verify_sec_ctx_len(attrs)))
268 if ((err = verify_replay(p, attrs)))
273 case XFRM_MODE_TRANSPORT:
274 case XFRM_MODE_TUNNEL:
275 case XFRM_MODE_ROUTEOPTIMIZATION:
285 if (attrs[XFRMA_MTIMER_THRESH])
286 if (!attrs[XFRMA_ENCAP])
293 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
294 struct xfrm_algo_desc *(*get_byname)(const char *, int),
297 struct xfrm_algo *p, *ualg;
298 struct xfrm_algo_desc *algo;
303 ualg = nla_data(rta);
305 algo = get_byname(ualg->alg_name, 1);
308 *props = algo->desc.sadb_alg_id;
310 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
314 strcpy(p->alg_name, algo->name);
319 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
321 struct xfrm_algo *p, *ualg;
322 struct xfrm_algo_desc *algo;
327 ualg = nla_data(rta);
329 algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
332 x->props.ealgo = algo->desc.sadb_alg_id;
334 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
338 strcpy(p->alg_name, algo->name);
340 x->geniv = algo->uinfo.encr.geniv;
344 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
347 struct xfrm_algo *ualg;
348 struct xfrm_algo_auth *p;
349 struct xfrm_algo_desc *algo;
354 ualg = nla_data(rta);
356 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
359 *props = algo->desc.sadb_alg_id;
361 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
365 strcpy(p->alg_name, algo->name);
366 p->alg_key_len = ualg->alg_key_len;
367 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
368 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
374 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
377 struct xfrm_algo_auth *p, *ualg;
378 struct xfrm_algo_desc *algo;
383 ualg = nla_data(rta);
385 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
388 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
390 *props = algo->desc.sadb_alg_id;
392 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
396 strcpy(p->alg_name, algo->name);
397 if (!p->alg_trunc_len)
398 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
404 static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
406 struct xfrm_algo_aead *p, *ualg;
407 struct xfrm_algo_desc *algo;
412 ualg = nla_data(rta);
414 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
417 x->props.ealgo = algo->desc.sadb_alg_id;
419 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
423 strcpy(p->alg_name, algo->name);
425 x->geniv = algo->uinfo.aead.geniv;
429 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
432 struct xfrm_replay_state_esn *up;
435 if (!replay_esn || !rp)
439 ulen = xfrm_replay_state_esn_len(up);
441 /* Check the overall length and the internal bitmap length to avoid
442 * potential overflow. */
443 if (nla_len(rp) < (int)ulen ||
444 xfrm_replay_state_esn_len(replay_esn) != ulen ||
445 replay_esn->bmp_len != up->bmp_len)
448 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
454 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
455 struct xfrm_replay_state_esn **preplay_esn,
458 struct xfrm_replay_state_esn *p, *pp, *up;
459 unsigned int klen, ulen;
465 klen = xfrm_replay_state_esn_len(up);
466 ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
468 p = kzalloc(klen, GFP_KERNEL);
472 pp = kzalloc(klen, GFP_KERNEL);
479 memcpy(pp, up, ulen);
487 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
489 unsigned int len = 0;
492 len += sizeof(struct xfrm_user_sec_ctx);
493 len += xfrm_ctx->ctx_len;
498 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
500 memcpy(&x->id, &p->id, sizeof(x->id));
501 memcpy(&x->sel, &p->sel, sizeof(x->sel));
502 memcpy(&x->lft, &p->lft, sizeof(x->lft));
503 x->props.mode = p->mode;
504 x->props.replay_window = min_t(unsigned int, p->replay_window,
505 sizeof(x->replay.bitmap) * 8);
506 x->props.reqid = p->reqid;
507 x->props.family = p->family;
508 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
509 x->props.flags = p->flags;
511 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
512 x->sel.family = p->family;
516 * someday when pfkey also has support, we could have the code
517 * somehow made shareable and move it to xfrm_state.c - JHS
520 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
523 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
524 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
525 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
526 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
527 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
528 struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
531 struct xfrm_replay_state_esn *replay_esn;
532 replay_esn = nla_data(re);
533 memcpy(x->replay_esn, replay_esn,
534 xfrm_replay_state_esn_len(replay_esn));
535 memcpy(x->preplay_esn, replay_esn,
536 xfrm_replay_state_esn_len(replay_esn));
540 struct xfrm_replay_state *replay;
541 replay = nla_data(rp);
542 memcpy(&x->replay, replay, sizeof(*replay));
543 memcpy(&x->preplay, replay, sizeof(*replay));
547 struct xfrm_lifetime_cur *ltime;
548 ltime = nla_data(lt);
549 x->curlft.bytes = ltime->bytes;
550 x->curlft.packets = ltime->packets;
551 x->curlft.add_time = ltime->add_time;
552 x->curlft.use_time = ltime->use_time;
556 x->replay_maxage = nla_get_u32(et);
559 x->replay_maxdiff = nla_get_u32(rt);
562 x->mapping_maxage = nla_get_u32(mt);
565 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
567 if (attrs[XFRMA_SET_MARK]) {
568 m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
569 if (attrs[XFRMA_SET_MARK_MASK])
570 m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
578 static struct xfrm_state *xfrm_state_construct(struct net *net,
579 struct xfrm_usersa_info *p,
580 struct nlattr **attrs,
583 struct xfrm_state *x = xfrm_state_alloc(net);
589 copy_from_user_state(x, p);
591 if (attrs[XFRMA_ENCAP]) {
592 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
593 sizeof(*x->encap), GFP_KERNEL);
594 if (x->encap == NULL)
598 if (attrs[XFRMA_COADDR]) {
599 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
600 sizeof(*x->coaddr), GFP_KERNEL);
601 if (x->coaddr == NULL)
605 if (attrs[XFRMA_SA_EXTRA_FLAGS])
606 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
608 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD])))
610 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
611 attrs[XFRMA_ALG_AUTH_TRUNC])))
613 if (!x->props.aalgo) {
614 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
615 attrs[XFRMA_ALG_AUTH])))
618 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT])))
620 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
621 xfrm_calg_get_byname,
622 attrs[XFRMA_ALG_COMP])))
625 if (attrs[XFRMA_TFCPAD])
626 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
628 xfrm_mark_get(attrs, &x->mark);
630 xfrm_smark_init(attrs, &x->props.smark);
632 if (attrs[XFRMA_IF_ID])
633 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
635 err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
639 if (attrs[XFRMA_SEC_CTX]) {
640 err = security_xfrm_state_alloc(x,
641 nla_data(attrs[XFRMA_SEC_CTX]));
646 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
647 attrs[XFRMA_REPLAY_ESN_VAL])))
651 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
652 /* sysctl_xfrm_aevent_etime is in 100ms units */
653 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
655 if ((err = xfrm_init_replay(x)))
658 /* override default values from above */
659 xfrm_update_ae_params(x, attrs, 0);
661 /* configure the hardware if offload is requested */
662 if (attrs[XFRMA_OFFLOAD_DEV]) {
663 err = xfrm_dev_state_add(net, x,
664 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
672 x->km.state = XFRM_STATE_DEAD;
679 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
680 struct nlattr **attrs)
682 struct net *net = sock_net(skb->sk);
683 struct xfrm_usersa_info *p = nlmsg_data(nlh);
684 struct xfrm_state *x;
688 err = verify_newsa_info(p, attrs);
692 x = xfrm_state_construct(net, p, attrs, &err);
697 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
698 err = xfrm_state_add(x);
700 err = xfrm_state_update(x);
702 xfrm_audit_state_add(x, err ? 0 : 1, true);
705 x->km.state = XFRM_STATE_DEAD;
706 xfrm_dev_state_delete(x);
711 if (x->km.state == XFRM_STATE_VOID)
712 x->km.state = XFRM_STATE_VALID;
714 c.seq = nlh->nlmsg_seq;
715 c.portid = nlh->nlmsg_pid;
716 c.event = nlh->nlmsg_type;
718 km_state_notify(x, &c);
724 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
725 struct xfrm_usersa_id *p,
726 struct nlattr **attrs,
729 struct xfrm_state *x = NULL;
732 u32 mark = xfrm_mark_get(attrs, &m);
734 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
736 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
738 xfrm_address_t *saddr = NULL;
740 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
747 x = xfrm_state_lookup_byaddr(net, mark,
749 p->proto, p->family);
758 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
759 struct nlattr **attrs)
761 struct net *net = sock_net(skb->sk);
762 struct xfrm_state *x;
765 struct xfrm_usersa_id *p = nlmsg_data(nlh);
767 x = xfrm_user_state_lookup(net, p, attrs, &err);
771 if ((err = security_xfrm_state_delete(x)) != 0)
774 if (xfrm_state_kern(x)) {
779 err = xfrm_state_delete(x);
784 c.seq = nlh->nlmsg_seq;
785 c.portid = nlh->nlmsg_pid;
786 c.event = nlh->nlmsg_type;
787 km_state_notify(x, &c);
790 xfrm_audit_state_delete(x, err ? 0 : 1, true);
795 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
797 memset(p, 0, sizeof(*p));
798 memcpy(&p->id, &x->id, sizeof(p->id));
799 memcpy(&p->sel, &x->sel, sizeof(p->sel));
800 memcpy(&p->lft, &x->lft, sizeof(p->lft));
801 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
802 put_unaligned(x->stats.replay_window, &p->stats.replay_window);
803 put_unaligned(x->stats.replay, &p->stats.replay);
804 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
805 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
806 p->mode = x->props.mode;
807 p->replay_window = x->props.replay_window;
808 p->reqid = x->props.reqid;
809 p->family = x->props.family;
810 p->flags = x->props.flags;
814 struct xfrm_dump_info {
815 struct sk_buff *in_skb;
816 struct sk_buff *out_skb;
821 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
823 struct xfrm_user_sec_ctx *uctx;
825 int ctx_size = sizeof(*uctx) + s->ctx_len;
827 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
831 uctx = nla_data(attr);
832 uctx->exttype = XFRMA_SEC_CTX;
833 uctx->len = ctx_size;
834 uctx->ctx_doi = s->ctx_doi;
835 uctx->ctx_alg = s->ctx_alg;
836 uctx->ctx_len = s->ctx_len;
837 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
842 static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
844 struct xfrm_user_offload *xuo;
847 attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
851 xuo = nla_data(attr);
852 memset(xuo, 0, sizeof(*xuo));
853 xuo->ifindex = xso->dev->ifindex;
854 xuo->flags = xso->flags;
859 static bool xfrm_redact(void)
861 return IS_ENABLED(CONFIG_SECURITY) &&
862 security_locked_down(LOCKDOWN_XFRM_SECRET);
865 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
867 struct xfrm_algo *algo;
868 struct xfrm_algo_auth *ap;
870 bool redact_secret = xfrm_redact();
872 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
873 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
876 algo = nla_data(nla);
877 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
879 if (redact_secret && auth->alg_key_len)
880 memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
882 memcpy(algo->alg_key, auth->alg_key,
883 (auth->alg_key_len + 7) / 8);
884 algo->alg_key_len = auth->alg_key_len;
886 nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
890 memcpy(ap, auth, sizeof(struct xfrm_algo_auth));
891 if (redact_secret && auth->alg_key_len)
892 memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
894 memcpy(ap->alg_key, auth->alg_key,
895 (auth->alg_key_len + 7) / 8);
899 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
901 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
902 struct xfrm_algo_aead *ap;
903 bool redact_secret = xfrm_redact();
909 memcpy(ap, aead, sizeof(*aead));
911 if (redact_secret && aead->alg_key_len)
912 memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
914 memcpy(ap->alg_key, aead->alg_key,
915 (aead->alg_key_len + 7) / 8);
919 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
921 struct xfrm_algo *ap;
922 bool redact_secret = xfrm_redact();
923 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
929 memcpy(ap, ealg, sizeof(*ealg));
931 if (redact_secret && ealg->alg_key_len)
932 memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
934 memcpy(ap->alg_key, ealg->alg_key,
935 (ealg->alg_key_len + 7) / 8);
940 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
945 ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
947 ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
952 /* Don't change this without updating xfrm_sa_len! */
953 static int copy_to_user_state_extra(struct xfrm_state *x,
954 struct xfrm_usersa_info *p,
959 copy_to_user_state(x, p);
961 if (x->props.extra_flags) {
962 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
963 x->props.extra_flags);
969 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
974 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
980 ret = copy_to_user_aead(x->aead, skb);
985 ret = copy_to_user_auth(x->aalg, skb);
990 ret = copy_to_user_ealg(x->ealg, skb);
995 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
1000 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
1005 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
1009 ret = xfrm_mark_put(skb, &x->mark);
1013 ret = xfrm_smark_put(skb, &x->props.smark);
1018 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1019 xfrm_replay_state_esn_len(x->replay_esn),
1022 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1027 ret = copy_user_offload(&x->xso, skb);
1031 ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
1036 ret = copy_sec_ctx(x->security, skb);
1040 if (x->mapping_maxage)
1041 ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
1046 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
1048 struct xfrm_dump_info *sp = ptr;
1049 struct sk_buff *in_skb = sp->in_skb;
1050 struct sk_buff *skb = sp->out_skb;
1051 struct xfrm_translator *xtr;
1052 struct xfrm_usersa_info *p;
1053 struct nlmsghdr *nlh;
1056 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1057 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
1061 p = nlmsg_data(nlh);
1063 err = copy_to_user_state_extra(x, p, skb);
1065 nlmsg_cancel(skb, nlh);
1068 nlmsg_end(skb, nlh);
1070 xtr = xfrm_get_translator();
1072 err = xtr->alloc_compat(skb, nlh);
1074 xfrm_put_translator(xtr);
1076 nlmsg_cancel(skb, nlh);
1084 static int xfrm_dump_sa_done(struct netlink_callback *cb)
1086 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1087 struct sock *sk = cb->skb->sk;
1088 struct net *net = sock_net(sk);
1091 xfrm_state_walk_done(walk, net);
1095 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
1097 struct net *net = sock_net(skb->sk);
1098 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1099 struct xfrm_dump_info info;
1101 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
1102 sizeof(cb->args) - sizeof(cb->args[0]));
1104 info.in_skb = cb->skb;
1106 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1107 info.nlmsg_flags = NLM_F_MULTI;
1110 struct nlattr *attrs[XFRMA_MAX+1];
1111 struct xfrm_address_filter *filter = NULL;
1115 err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
1116 xfrma_policy, cb->extack);
1120 if (attrs[XFRMA_ADDRESS_FILTER]) {
1121 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
1122 sizeof(*filter), GFP_KERNEL);
1127 if (attrs[XFRMA_PROTO])
1128 proto = nla_get_u8(attrs[XFRMA_PROTO]);
1130 xfrm_state_walk_init(walk, proto, filter);
1134 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
1139 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
1140 struct xfrm_state *x, u32 seq)
1142 struct xfrm_dump_info info;
1143 struct sk_buff *skb;
1146 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1148 return ERR_PTR(-ENOMEM);
1150 info.in_skb = in_skb;
1152 info.nlmsg_seq = seq;
1153 info.nlmsg_flags = 0;
1155 err = dump_one_state(x, 0, &info);
1158 return ERR_PTR(err);
1164 /* A wrapper for nlmsg_multicast() checking that nlsk is still available.
1165 * Must be called with RCU read lock.
1167 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1168 u32 pid, unsigned int group)
1170 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1171 struct xfrm_translator *xtr;
1178 xtr = xfrm_get_translator();
1180 int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1182 xfrm_put_translator(xtr);
1189 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1192 static inline unsigned int xfrm_spdinfo_msgsize(void)
1194 return NLMSG_ALIGN(4)
1195 + nla_total_size(sizeof(struct xfrmu_spdinfo))
1196 + nla_total_size(sizeof(struct xfrmu_spdhinfo))
1197 + nla_total_size(sizeof(struct xfrmu_spdhthresh))
1198 + nla_total_size(sizeof(struct xfrmu_spdhthresh));
1201 static int build_spdinfo(struct sk_buff *skb, struct net *net,
1202 u32 portid, u32 seq, u32 flags)
1204 struct xfrmk_spdinfo si;
1205 struct xfrmu_spdinfo spc;
1206 struct xfrmu_spdhinfo sph;
1207 struct xfrmu_spdhthresh spt4, spt6;
1208 struct nlmsghdr *nlh;
1213 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
1214 if (nlh == NULL) /* shouldn't really happen ... */
1217 f = nlmsg_data(nlh);
1219 xfrm_spd_getinfo(net, &si);
1220 spc.incnt = si.incnt;
1221 spc.outcnt = si.outcnt;
1222 spc.fwdcnt = si.fwdcnt;
1223 spc.inscnt = si.inscnt;
1224 spc.outscnt = si.outscnt;
1225 spc.fwdscnt = si.fwdscnt;
1226 sph.spdhcnt = si.spdhcnt;
1227 sph.spdhmcnt = si.spdhmcnt;
1230 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1232 spt4.lbits = net->xfrm.policy_hthresh.lbits4;
1233 spt4.rbits = net->xfrm.policy_hthresh.rbits4;
1234 spt6.lbits = net->xfrm.policy_hthresh.lbits6;
1235 spt6.rbits = net->xfrm.policy_hthresh.rbits6;
1236 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
1238 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
1240 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
1242 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
1244 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
1246 nlmsg_cancel(skb, nlh);
1250 nlmsg_end(skb, nlh);
1254 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1255 struct nlattr **attrs)
1257 struct net *net = sock_net(skb->sk);
1258 struct xfrmu_spdhthresh *thresh4 = NULL;
1259 struct xfrmu_spdhthresh *thresh6 = NULL;
1261 /* selector prefixlen thresholds to hash policies */
1262 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
1263 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
1265 if (nla_len(rta) < sizeof(*thresh4))
1267 thresh4 = nla_data(rta);
1268 if (thresh4->lbits > 32 || thresh4->rbits > 32)
1271 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
1272 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
1274 if (nla_len(rta) < sizeof(*thresh6))
1276 thresh6 = nla_data(rta);
1277 if (thresh6->lbits > 128 || thresh6->rbits > 128)
1281 if (thresh4 || thresh6) {
1282 write_seqlock(&net->xfrm.policy_hthresh.lock);
1284 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
1285 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
1288 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
1289 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
1291 write_sequnlock(&net->xfrm.policy_hthresh.lock);
1293 xfrm_policy_hash_rebuild(net);
1299 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1300 struct nlattr **attrs)
1302 struct net *net = sock_net(skb->sk);
1303 struct sk_buff *r_skb;
1304 u32 *flags = nlmsg_data(nlh);
1305 u32 sportid = NETLINK_CB(skb).portid;
1306 u32 seq = nlh->nlmsg_seq;
1309 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
1313 err = build_spdinfo(r_skb, net, sportid, seq, *flags);
1316 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1319 static inline unsigned int xfrm_sadinfo_msgsize(void)
1321 return NLMSG_ALIGN(4)
1322 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1323 + nla_total_size(4); /* XFRMA_SAD_CNT */
1326 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1327 u32 portid, u32 seq, u32 flags)
1329 struct xfrmk_sadinfo si;
1330 struct xfrmu_sadhinfo sh;
1331 struct nlmsghdr *nlh;
1335 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1336 if (nlh == NULL) /* shouldn't really happen ... */
1339 f = nlmsg_data(nlh);
1341 xfrm_sad_getinfo(net, &si);
1343 sh.sadhmcnt = si.sadhmcnt;
1344 sh.sadhcnt = si.sadhcnt;
1346 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1348 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1350 nlmsg_cancel(skb, nlh);
1354 nlmsg_end(skb, nlh);
1358 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1359 struct nlattr **attrs)
1361 struct net *net = sock_net(skb->sk);
1362 struct sk_buff *r_skb;
1363 u32 *flags = nlmsg_data(nlh);
1364 u32 sportid = NETLINK_CB(skb).portid;
1365 u32 seq = nlh->nlmsg_seq;
1368 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1372 err = build_sadinfo(r_skb, net, sportid, seq, *flags);
1375 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1378 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1379 struct nlattr **attrs)
1381 struct net *net = sock_net(skb->sk);
1382 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1383 struct xfrm_state *x;
1384 struct sk_buff *resp_skb;
1387 x = xfrm_user_state_lookup(net, p, attrs, &err);
1391 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1392 if (IS_ERR(resp_skb)) {
1393 err = PTR_ERR(resp_skb);
1395 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1402 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1403 struct nlattr **attrs)
1405 struct net *net = sock_net(skb->sk);
1406 struct xfrm_state *x;
1407 struct xfrm_userspi_info *p;
1408 struct xfrm_translator *xtr;
1409 struct sk_buff *resp_skb;
1410 xfrm_address_t *daddr;
1417 p = nlmsg_data(nlh);
1418 err = verify_spi_info(p->info.id.proto, p->min, p->max);
1422 family = p->info.family;
1423 daddr = &p->info.id.daddr;
1427 mark = xfrm_mark_get(attrs, &m);
1429 if (attrs[XFRMA_IF_ID])
1430 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1433 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1434 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1441 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1442 if_id, p->info.id.proto, daddr,
1449 err = xfrm_alloc_spi(x, p->min, p->max);
1453 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1454 if (IS_ERR(resp_skb)) {
1455 err = PTR_ERR(resp_skb);
1459 xtr = xfrm_get_translator();
1461 err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1463 xfrm_put_translator(xtr);
1465 kfree_skb(resp_skb);
1470 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1478 static int verify_policy_dir(u8 dir)
1481 case XFRM_POLICY_IN:
1482 case XFRM_POLICY_OUT:
1483 case XFRM_POLICY_FWD:
1493 static int verify_policy_type(u8 type)
1496 case XFRM_POLICY_TYPE_MAIN:
1497 #ifdef CONFIG_XFRM_SUB_POLICY
1498 case XFRM_POLICY_TYPE_SUB:
1509 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1514 case XFRM_SHARE_ANY:
1515 case XFRM_SHARE_SESSION:
1516 case XFRM_SHARE_USER:
1517 case XFRM_SHARE_UNIQUE:
1524 switch (p->action) {
1525 case XFRM_POLICY_ALLOW:
1526 case XFRM_POLICY_BLOCK:
1533 switch (p->sel.family) {
1535 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
1541 #if IS_ENABLED(CONFIG_IPV6)
1542 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
1547 return -EAFNOSUPPORT;
1554 ret = verify_policy_dir(p->dir);
1557 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
1563 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1565 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1566 struct xfrm_user_sec_ctx *uctx;
1571 uctx = nla_data(rt);
1572 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1575 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1581 for (i = 0; i < nr; i++, ut++) {
1582 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1584 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1585 memcpy(&t->saddr, &ut->saddr,
1586 sizeof(xfrm_address_t));
1587 t->reqid = ut->reqid;
1589 t->share = ut->share;
1590 t->optional = ut->optional;
1591 t->aalgos = ut->aalgos;
1592 t->ealgos = ut->ealgos;
1593 t->calgos = ut->calgos;
1594 /* If all masks are ~0, then we allow all algorithms. */
1595 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1596 t->encap_family = ut->family;
1600 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1605 if (nr > XFRM_MAX_DEPTH)
1608 prev_family = family;
1610 for (i = 0; i < nr; i++) {
1611 /* We never validated the ut->family value, so many
1612 * applications simply leave it at zero. The check was
1613 * never made and ut->family was ignored because all
1614 * templates could be assumed to have the same family as
1615 * the policy itself. Now that we will have ipv4-in-ipv6
1616 * and ipv6-in-ipv4 tunnels, this is no longer true.
1619 ut[i].family = family;
1621 switch (ut[i].mode) {
1622 case XFRM_MODE_TUNNEL:
1623 case XFRM_MODE_BEET:
1626 if (ut[i].family != prev_family)
1630 if (ut[i].mode >= XFRM_MODE_MAX)
1633 prev_family = ut[i].family;
1635 switch (ut[i].family) {
1638 #if IS_ENABLED(CONFIG_IPV6)
1646 if (!xfrm_id_proto_valid(ut[i].id.proto))
1653 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1655 struct nlattr *rt = attrs[XFRMA_TMPL];
1660 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1661 int nr = nla_len(rt) / sizeof(*utmpl);
1664 err = validate_tmpl(nr, utmpl, pol->family);
1668 copy_templates(pol, utmpl, nr);
1673 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1675 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1676 struct xfrm_userpolicy_type *upt;
1677 u8 type = XFRM_POLICY_TYPE_MAIN;
1685 err = verify_policy_type(type);
1693 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1695 xp->priority = p->priority;
1696 xp->index = p->index;
1697 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1698 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1699 xp->action = p->action;
1700 xp->flags = p->flags;
1701 xp->family = p->sel.family;
1702 /* XXX xp->share = p->share; */
1705 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1707 memset(p, 0, sizeof(*p));
1708 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1709 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1710 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1711 p->priority = xp->priority;
1712 p->index = xp->index;
1713 p->sel.family = xp->family;
1715 p->action = xp->action;
1716 p->flags = xp->flags;
1717 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1720 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1722 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1730 copy_from_user_policy(xp, p);
1732 err = copy_from_user_policy_type(&xp->type, attrs);
1736 if (!(err = copy_from_user_tmpl(xp, attrs)))
1737 err = copy_from_user_sec_ctx(xp, attrs);
1741 xfrm_mark_get(attrs, &xp->mark);
1743 if (attrs[XFRMA_IF_ID])
1744 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1750 xfrm_policy_destroy(xp);
1754 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1755 struct nlattr **attrs)
1757 struct net *net = sock_net(skb->sk);
1758 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1759 struct xfrm_policy *xp;
1764 err = verify_newpolicy_info(p);
1767 err = verify_sec_ctx_len(attrs);
1771 xp = xfrm_policy_construct(net, p, attrs, &err);
1775 /* shouldn't excl be based on nlh flags??
1776 * Aha! this is anti-netlink really i.e more pfkey derived
1777 * in netlink excl is a flag and you wouldn't need
1778 * a type XFRM_MSG_UPDPOLICY - JHS */
1779 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1780 err = xfrm_policy_insert(p->dir, xp, excl);
1781 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
1784 security_xfrm_policy_free(xp->security);
1789 c.event = nlh->nlmsg_type;
1790 c.seq = nlh->nlmsg_seq;
1791 c.portid = nlh->nlmsg_pid;
1792 km_policy_notify(xp, p->dir, &c);
1799 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1801 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1804 if (xp->xfrm_nr == 0)
1807 for (i = 0; i < xp->xfrm_nr; i++) {
1808 struct xfrm_user_tmpl *up = &vec[i];
1809 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1811 memset(up, 0, sizeof(*up));
1812 memcpy(&up->id, &kp->id, sizeof(up->id));
1813 up->family = kp->encap_family;
1814 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1815 up->reqid = kp->reqid;
1816 up->mode = kp->mode;
1817 up->share = kp->share;
1818 up->optional = kp->optional;
1819 up->aalgos = kp->aalgos;
1820 up->ealgos = kp->ealgos;
1821 up->calgos = kp->calgos;
1824 return nla_put(skb, XFRMA_TMPL,
1825 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1828 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1831 return copy_sec_ctx(x->security, skb);
1836 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1839 return copy_sec_ctx(xp->security, skb);
1842 static inline unsigned int userpolicy_type_attrsize(void)
1844 #ifdef CONFIG_XFRM_SUB_POLICY
1845 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1851 #ifdef CONFIG_XFRM_SUB_POLICY
1852 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1854 struct xfrm_userpolicy_type upt;
1856 /* Sadly there are two holes in struct xfrm_userpolicy_type */
1857 memset(&upt, 0, sizeof(upt));
1860 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1864 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1870 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1872 struct xfrm_dump_info *sp = ptr;
1873 struct xfrm_userpolicy_info *p;
1874 struct sk_buff *in_skb = sp->in_skb;
1875 struct sk_buff *skb = sp->out_skb;
1876 struct xfrm_translator *xtr;
1877 struct nlmsghdr *nlh;
1880 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1881 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1885 p = nlmsg_data(nlh);
1886 copy_to_user_policy(xp, p, dir);
1887 err = copy_to_user_tmpl(xp, skb);
1889 err = copy_to_user_sec_ctx(xp, skb);
1891 err = copy_to_user_policy_type(xp->type, skb);
1893 err = xfrm_mark_put(skb, &xp->mark);
1895 err = xfrm_if_id_put(skb, xp->if_id);
1897 nlmsg_cancel(skb, nlh);
1900 nlmsg_end(skb, nlh);
1902 xtr = xfrm_get_translator();
1904 err = xtr->alloc_compat(skb, nlh);
1906 xfrm_put_translator(xtr);
1908 nlmsg_cancel(skb, nlh);
1916 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1918 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1919 struct net *net = sock_net(cb->skb->sk);
1921 xfrm_policy_walk_done(walk, net);
1925 static int xfrm_dump_policy_start(struct netlink_callback *cb)
1927 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1929 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
1931 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1935 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1937 struct net *net = sock_net(skb->sk);
1938 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1939 struct xfrm_dump_info info;
1941 info.in_skb = cb->skb;
1943 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1944 info.nlmsg_flags = NLM_F_MULTI;
1946 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1951 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1952 struct xfrm_policy *xp,
1955 struct xfrm_dump_info info;
1956 struct sk_buff *skb;
1959 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1961 return ERR_PTR(-ENOMEM);
1963 info.in_skb = in_skb;
1965 info.nlmsg_seq = seq;
1966 info.nlmsg_flags = 0;
1968 err = dump_one_policy(xp, dir, 0, &info);
1971 return ERR_PTR(err);
1977 static int xfrm_notify_userpolicy(struct net *net)
1979 struct xfrm_userpolicy_default *up;
1980 int len = NLMSG_ALIGN(sizeof(*up));
1981 struct nlmsghdr *nlh;
1982 struct sk_buff *skb;
1985 skb = nlmsg_new(len, GFP_ATOMIC);
1989 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
1995 up = nlmsg_data(nlh);
1996 up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
1997 up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
1998 up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2000 nlmsg_end(skb, nlh);
2003 err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2009 static bool xfrm_userpolicy_is_valid(__u8 policy)
2011 return policy == XFRM_USERPOLICY_BLOCK ||
2012 policy == XFRM_USERPOLICY_ACCEPT;
2015 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2016 struct nlattr **attrs)
2018 struct net *net = sock_net(skb->sk);
2019 struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
2021 if (xfrm_userpolicy_is_valid(up->in))
2022 net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
2024 if (xfrm_userpolicy_is_valid(up->fwd))
2025 net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
2027 if (xfrm_userpolicy_is_valid(up->out))
2028 net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
2030 rt_genid_bump_all(net);
2032 xfrm_notify_userpolicy(net);
2036 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2037 struct nlattr **attrs)
2039 struct sk_buff *r_skb;
2040 struct nlmsghdr *r_nlh;
2041 struct net *net = sock_net(skb->sk);
2042 struct xfrm_userpolicy_default *r_up;
2043 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
2044 u32 portid = NETLINK_CB(skb).portid;
2045 u32 seq = nlh->nlmsg_seq;
2047 r_skb = nlmsg_new(len, GFP_ATOMIC);
2051 r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
2057 r_up = nlmsg_data(r_nlh);
2058 r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2059 r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2060 r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2061 nlmsg_end(r_skb, r_nlh);
2063 return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
2066 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2067 struct nlattr **attrs)
2069 struct net *net = sock_net(skb->sk);
2070 struct xfrm_policy *xp;
2071 struct xfrm_userpolicy_id *p;
2072 u8 type = XFRM_POLICY_TYPE_MAIN;
2079 p = nlmsg_data(nlh);
2080 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
2082 err = copy_from_user_policy_type(&type, attrs);
2086 err = verify_policy_dir(p->dir);
2090 if (attrs[XFRMA_IF_ID])
2091 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2093 xfrm_mark_get(attrs, &m);
2096 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
2097 p->index, delete, &err);
2099 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2100 struct xfrm_sec_ctx *ctx;
2102 err = verify_sec_ctx_len(attrs);
2108 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2110 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2114 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2115 &p->sel, ctx, delete, &err);
2116 security_xfrm_policy_free(ctx);
2122 struct sk_buff *resp_skb;
2124 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
2125 if (IS_ERR(resp_skb)) {
2126 err = PTR_ERR(resp_skb);
2128 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
2129 NETLINK_CB(skb).portid);
2132 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2137 c.data.byid = p->index;
2138 c.event = nlh->nlmsg_type;
2139 c.seq = nlh->nlmsg_seq;
2140 c.portid = nlh->nlmsg_pid;
2141 km_policy_notify(xp, p->dir, &c);
2149 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
2150 struct nlattr **attrs)
2152 struct net *net = sock_net(skb->sk);
2154 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
2157 err = xfrm_state_flush(net, p->proto, true, false);
2159 if (err == -ESRCH) /* empty table */
2163 c.data.proto = p->proto;
2164 c.event = nlh->nlmsg_type;
2165 c.seq = nlh->nlmsg_seq;
2166 c.portid = nlh->nlmsg_pid;
2168 km_state_notify(NULL, &c);
2173 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
2175 unsigned int replay_size = x->replay_esn ?
2176 xfrm_replay_state_esn_len(x->replay_esn) :
2177 sizeof(struct xfrm_replay_state);
2179 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
2180 + nla_total_size(replay_size)
2181 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
2182 + nla_total_size(sizeof(struct xfrm_mark))
2183 + nla_total_size(4) /* XFRM_AE_RTHR */
2184 + nla_total_size(4); /* XFRM_AE_ETHR */
2187 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2189 struct xfrm_aevent_id *id;
2190 struct nlmsghdr *nlh;
2193 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
2197 id = nlmsg_data(nlh);
2198 memset(&id->sa_id, 0, sizeof(id->sa_id));
2199 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
2200 id->sa_id.spi = x->id.spi;
2201 id->sa_id.family = x->props.family;
2202 id->sa_id.proto = x->id.proto;
2203 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
2204 id->reqid = x->props.reqid;
2205 id->flags = c->data.aevent;
2207 if (x->replay_esn) {
2208 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
2209 xfrm_replay_state_esn_len(x->replay_esn),
2212 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
2217 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
2222 if (id->flags & XFRM_AE_RTHR) {
2223 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
2227 if (id->flags & XFRM_AE_ETHR) {
2228 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
2229 x->replay_maxage * 10 / HZ);
2233 err = xfrm_mark_put(skb, &x->mark);
2237 err = xfrm_if_id_put(skb, x->if_id);
2241 nlmsg_end(skb, nlh);
2245 nlmsg_cancel(skb, nlh);
2249 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2250 struct nlattr **attrs)
2252 struct net *net = sock_net(skb->sk);
2253 struct xfrm_state *x;
2254 struct sk_buff *r_skb;
2259 struct xfrm_aevent_id *p = nlmsg_data(nlh);
2260 struct xfrm_usersa_id *id = &p->sa_id;
2262 mark = xfrm_mark_get(attrs, &m);
2264 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
2268 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2269 if (r_skb == NULL) {
2275 * XXX: is this lock really needed - none of the other
2276 * gets lock (the concern is things getting updated
2277 * while we are still reading) - jhs
2279 spin_lock_bh(&x->lock);
2280 c.data.aevent = p->flags;
2281 c.seq = nlh->nlmsg_seq;
2282 c.portid = nlh->nlmsg_pid;
2284 err = build_aevent(r_skb, x, &c);
2287 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
2288 spin_unlock_bh(&x->lock);
2293 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2294 struct nlattr **attrs)
2296 struct net *net = sock_net(skb->sk);
2297 struct xfrm_state *x;
2302 struct xfrm_aevent_id *p = nlmsg_data(nlh);
2303 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
2304 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
2305 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
2306 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
2307 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
2309 if (!lt && !rp && !re && !et && !rt)
2312 /* pedantic mode - thou shalt sayeth replaceth */
2313 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
2316 mark = xfrm_mark_get(attrs, &m);
2318 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
2322 if (x->km.state != XFRM_STATE_VALID)
2325 err = xfrm_replay_verify_len(x->replay_esn, re);
2329 spin_lock_bh(&x->lock);
2330 xfrm_update_ae_params(x, attrs, 1);
2331 spin_unlock_bh(&x->lock);
2333 c.event = nlh->nlmsg_type;
2334 c.seq = nlh->nlmsg_seq;
2335 c.portid = nlh->nlmsg_pid;
2336 c.data.aevent = XFRM_AE_CU;
2337 km_state_notify(x, &c);
2344 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2345 struct nlattr **attrs)
2347 struct net *net = sock_net(skb->sk);
2349 u8 type = XFRM_POLICY_TYPE_MAIN;
2352 err = copy_from_user_policy_type(&type, attrs);
2356 err = xfrm_policy_flush(net, type, true);
2358 if (err == -ESRCH) /* empty table */
2364 c.event = nlh->nlmsg_type;
2365 c.seq = nlh->nlmsg_seq;
2366 c.portid = nlh->nlmsg_pid;
2368 km_policy_notify(NULL, 0, &c);
2372 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2373 struct nlattr **attrs)
2375 struct net *net = sock_net(skb->sk);
2376 struct xfrm_policy *xp;
2377 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
2378 struct xfrm_userpolicy_info *p = &up->pol;
2379 u8 type = XFRM_POLICY_TYPE_MAIN;
2384 err = copy_from_user_policy_type(&type, attrs);
2388 err = verify_policy_dir(p->dir);
2392 if (attrs[XFRMA_IF_ID])
2393 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2395 xfrm_mark_get(attrs, &m);
2398 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
2401 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2402 struct xfrm_sec_ctx *ctx;
2404 err = verify_sec_ctx_len(attrs);
2410 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2412 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2416 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2417 &p->sel, ctx, 0, &err);
2418 security_xfrm_policy_free(ctx);
2423 if (unlikely(xp->walk.dead))
2428 xfrm_policy_delete(xp, p->dir);
2429 xfrm_audit_policy_delete(xp, 1, true);
2431 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2438 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2439 struct nlattr **attrs)
2441 struct net *net = sock_net(skb->sk);
2442 struct xfrm_state *x;
2444 struct xfrm_user_expire *ue = nlmsg_data(nlh);
2445 struct xfrm_usersa_info *p = &ue->state;
2447 u32 mark = xfrm_mark_get(attrs, &m);
2449 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
2455 spin_lock_bh(&x->lock);
2457 if (x->km.state != XFRM_STATE_VALID)
2459 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2462 __xfrm_state_delete(x);
2463 xfrm_audit_state_delete(x, 1, true);
2467 spin_unlock_bh(&x->lock);
2472 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2473 struct nlattr **attrs)
2475 struct net *net = sock_net(skb->sk);
2476 struct xfrm_policy *xp;
2477 struct xfrm_user_tmpl *ut;
2479 struct nlattr *rt = attrs[XFRMA_TMPL];
2480 struct xfrm_mark mark;
2482 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2483 struct xfrm_state *x = xfrm_state_alloc(net);
2489 xfrm_mark_get(attrs, &mark);
2491 err = verify_newpolicy_info(&ua->policy);
2494 err = verify_sec_ctx_len(attrs);
2499 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2503 memcpy(&x->id, &ua->id, sizeof(ua->id));
2504 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2505 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2506 xp->mark.m = x->mark.m = mark.m;
2507 xp->mark.v = x->mark.v = mark.v;
2509 /* extract the templates and for each call km_key */
2510 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2511 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2512 memcpy(&x->id, &t->id, sizeof(x->id));
2513 x->props.mode = t->mode;
2514 x->props.reqid = t->reqid;
2515 x->props.family = ut->family;
2516 t->aalgos = ua->aalgos;
2517 t->ealgos = ua->ealgos;
2518 t->calgos = ua->calgos;
2519 err = km_query(x, t, xp);
2534 #ifdef CONFIG_XFRM_MIGRATE
2535 static int copy_from_user_migrate(struct xfrm_migrate *ma,
2536 struct xfrm_kmaddress *k,
2537 struct nlattr **attrs, int *num)
2539 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2540 struct xfrm_user_migrate *um;
2544 struct xfrm_user_kmaddress *uk;
2546 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2547 memcpy(&k->local, &uk->local, sizeof(k->local));
2548 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2549 k->family = uk->family;
2550 k->reserved = uk->reserved;
2554 num_migrate = nla_len(rt) / sizeof(*um);
2556 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2559 for (i = 0; i < num_migrate; i++, um++, ma++) {
2560 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2561 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2562 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2563 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2565 ma->proto = um->proto;
2566 ma->mode = um->mode;
2567 ma->reqid = um->reqid;
2569 ma->old_family = um->old_family;
2570 ma->new_family = um->new_family;
2577 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2578 struct nlattr **attrs)
2580 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2581 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2582 struct xfrm_kmaddress km, *kmp;
2586 struct net *net = sock_net(skb->sk);
2587 struct xfrm_encap_tmpl *encap = NULL;
2590 if (attrs[XFRMA_MIGRATE] == NULL)
2593 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2595 err = copy_from_user_policy_type(&type, attrs);
2599 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2606 if (attrs[XFRMA_ENCAP]) {
2607 encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
2608 sizeof(*encap), GFP_KERNEL);
2613 if (attrs[XFRMA_IF_ID])
2614 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2616 err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id);
2623 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2624 struct nlattr **attrs)
2626 return -ENOPROTOOPT;
2630 #ifdef CONFIG_XFRM_MIGRATE
2631 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2633 struct xfrm_user_migrate um;
2635 memset(&um, 0, sizeof(um));
2636 um.proto = m->proto;
2638 um.reqid = m->reqid;
2639 um.old_family = m->old_family;
2640 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2641 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2642 um.new_family = m->new_family;
2643 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2644 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2646 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2649 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2651 struct xfrm_user_kmaddress uk;
2653 memset(&uk, 0, sizeof(uk));
2654 uk.family = k->family;
2655 uk.reserved = k->reserved;
2656 memcpy(&uk.local, &k->local, sizeof(uk.local));
2657 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2659 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2662 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
2665 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2666 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2667 + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
2668 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2669 + userpolicy_type_attrsize();
2672 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2673 int num_migrate, const struct xfrm_kmaddress *k,
2674 const struct xfrm_selector *sel,
2675 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
2677 const struct xfrm_migrate *mp;
2678 struct xfrm_userpolicy_id *pol_id;
2679 struct nlmsghdr *nlh;
2682 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2686 pol_id = nlmsg_data(nlh);
2687 /* copy data from selector, dir, and type to the pol_id */
2688 memset(pol_id, 0, sizeof(*pol_id));
2689 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2693 err = copy_to_user_kmaddress(k, skb);
2698 err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
2702 err = copy_to_user_policy_type(type, skb);
2705 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2706 err = copy_to_user_migrate(mp, skb);
2711 nlmsg_end(skb, nlh);
2715 nlmsg_cancel(skb, nlh);
2719 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2720 const struct xfrm_migrate *m, int num_migrate,
2721 const struct xfrm_kmaddress *k,
2722 const struct xfrm_encap_tmpl *encap)
2724 struct net *net = &init_net;
2725 struct sk_buff *skb;
2728 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
2734 err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
2737 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
2740 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2741 const struct xfrm_migrate *m, int num_migrate,
2742 const struct xfrm_kmaddress *k,
2743 const struct xfrm_encap_tmpl *encap)
2745 return -ENOPROTOOPT;
2749 #define XMSGSIZE(type) sizeof(struct type)
2751 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2752 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2753 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2754 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2755 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2756 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2757 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2758 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2759 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2760 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2761 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2762 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2763 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2764 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2765 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2766 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2767 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2768 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2769 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2770 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2771 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2772 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2773 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
2774 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
2776 EXPORT_SYMBOL_GPL(xfrm_msg_min);
2780 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2781 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2782 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2783 [XFRMA_LASTUSED] = { .type = NLA_U64},
2784 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2785 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2786 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2787 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2788 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2789 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2790 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2791 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2792 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2793 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2794 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2795 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2796 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2797 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2798 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2799 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2800 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2801 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2802 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2803 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
2804 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
2805 [XFRMA_PROTO] = { .type = NLA_U8 },
2806 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
2807 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
2808 [XFRMA_SET_MARK] = { .type = NLA_U32 },
2809 [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
2810 [XFRMA_IF_ID] = { .type = NLA_U32 },
2812 EXPORT_SYMBOL_GPL(xfrma_policy);
2814 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
2815 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
2816 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
2819 static const struct xfrm_link {
2820 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2821 int (*start)(struct netlink_callback *);
2822 int (*dump)(struct sk_buff *, struct netlink_callback *);
2823 int (*done)(struct netlink_callback *);
2824 const struct nla_policy *nla_pol;
2826 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2827 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2828 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2829 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2830 .dump = xfrm_dump_sa,
2831 .done = xfrm_dump_sa_done },
2832 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2833 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2834 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2835 .start = xfrm_dump_policy_start,
2836 .dump = xfrm_dump_policy,
2837 .done = xfrm_dump_policy_done },
2838 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2839 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2840 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2841 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2842 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2843 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2844 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2845 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2846 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2847 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2848 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2849 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2850 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
2851 .nla_pol = xfrma_spd_policy,
2852 .nla_max = XFRMA_SPD_MAX },
2853 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2854 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default },
2855 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default },
2858 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
2859 struct netlink_ext_ack *extack)
2861 struct net *net = sock_net(skb->sk);
2862 struct nlattr *attrs[XFRMA_MAX+1];
2863 const struct xfrm_link *link;
2864 struct nlmsghdr *nlh64 = NULL;
2867 type = nlh->nlmsg_type;
2868 if (type > XFRM_MSG_MAX)
2871 type -= XFRM_MSG_BASE;
2872 link = &xfrm_dispatch[type];
2874 /* All operations require privileges, even GET */
2875 if (!netlink_net_capable(skb, CAP_NET_ADMIN))
2878 if (in_compat_syscall()) {
2879 struct xfrm_translator *xtr = xfrm_get_translator();
2884 nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
2885 link->nla_pol, extack);
2886 xfrm_put_translator(xtr);
2888 return PTR_ERR(nlh64);
2893 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2894 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2895 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2896 struct netlink_dump_control c = {
2897 .start = link->start,
2902 if (link->dump == NULL) {
2907 err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2911 err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
2912 link->nla_max ? : XFRMA_MAX,
2913 link->nla_pol ? : xfrma_policy, extack);
2917 if (link->doit == NULL) {
2922 err = link->doit(skb, nlh, attrs);
2924 /* We need to free skb allocated in xfrm_alloc_compat() before
2925 * returning from this function, because consume_skb() won't take
2926 * care of frag_list since netlink destructor sets
2927 * sbk->head to NULL. (see netlink_skb_destructor())
2929 if (skb_has_frag_list(skb)) {
2930 kfree_skb(skb_shinfo(skb)->frag_list);
2931 skb_shinfo(skb)->frag_list = NULL;
2939 static void xfrm_netlink_rcv(struct sk_buff *skb)
2941 struct net *net = sock_net(skb->sk);
2943 mutex_lock(&net->xfrm.xfrm_cfg_mutex);
2944 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2945 mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
2948 static inline unsigned int xfrm_expire_msgsize(void)
2950 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2951 + nla_total_size(sizeof(struct xfrm_mark));
2954 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2956 struct xfrm_user_expire *ue;
2957 struct nlmsghdr *nlh;
2960 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2964 ue = nlmsg_data(nlh);
2965 copy_to_user_state(x, &ue->state);
2966 ue->hard = (c->data.hard != 0) ? 1 : 0;
2967 /* clear the padding bytes */
2968 memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard));
2970 err = xfrm_mark_put(skb, &x->mark);
2974 err = xfrm_if_id_put(skb, x->if_id);
2978 nlmsg_end(skb, nlh);
2982 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2984 struct net *net = xs_net(x);
2985 struct sk_buff *skb;
2987 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2991 if (build_expire(skb, x, c) < 0) {
2996 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2999 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
3001 struct net *net = xs_net(x);
3002 struct sk_buff *skb;
3005 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
3009 err = build_aevent(skb, x, c);
3012 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
3015 static int xfrm_notify_sa_flush(const struct km_event *c)
3017 struct net *net = c->net;
3018 struct xfrm_usersa_flush *p;
3019 struct nlmsghdr *nlh;
3020 struct sk_buff *skb;
3021 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
3023 skb = nlmsg_new(len, GFP_ATOMIC);
3027 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
3033 p = nlmsg_data(nlh);
3034 p->proto = c->data.proto;
3036 nlmsg_end(skb, nlh);
3038 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3041 static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
3045 l += nla_total_size(aead_len(x->aead));
3047 l += nla_total_size(sizeof(struct xfrm_algo) +
3048 (x->aalg->alg_key_len + 7) / 8);
3049 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
3052 l += nla_total_size(xfrm_alg_len(x->ealg));
3054 l += nla_total_size(sizeof(*x->calg));
3056 l += nla_total_size(sizeof(*x->encap));
3058 l += nla_total_size(sizeof(x->tfcpad));
3060 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
3062 l += nla_total_size(sizeof(struct xfrm_replay_state));
3064 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
3065 x->security->ctx_len);
3067 l += nla_total_size(sizeof(*x->coaddr));
3068 if (x->props.extra_flags)
3069 l += nla_total_size(sizeof(x->props.extra_flags));
3071 l += nla_total_size(sizeof(struct xfrm_user_offload));
3072 if (x->props.smark.v | x->props.smark.m) {
3073 l += nla_total_size(sizeof(x->props.smark.v));
3074 l += nla_total_size(sizeof(x->props.smark.m));
3077 l += nla_total_size(sizeof(x->if_id));
3079 /* Must count x->lastused as it may become non-zero behind our back. */
3080 l += nla_total_size_64bit(sizeof(u64));
3082 if (x->mapping_maxage)
3083 l += nla_total_size(sizeof(x->mapping_maxage));
3088 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
3090 struct net *net = xs_net(x);
3091 struct xfrm_usersa_info *p;
3092 struct xfrm_usersa_id *id;
3093 struct nlmsghdr *nlh;
3094 struct sk_buff *skb;
3095 unsigned int len = xfrm_sa_len(x);
3096 unsigned int headlen;
3099 headlen = sizeof(*p);
3100 if (c->event == XFRM_MSG_DELSA) {
3101 len += nla_total_size(headlen);
3102 headlen = sizeof(*id);
3103 len += nla_total_size(sizeof(struct xfrm_mark));
3105 len += NLMSG_ALIGN(headlen);
3107 skb = nlmsg_new(len, GFP_ATOMIC);
3111 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3116 p = nlmsg_data(nlh);
3117 if (c->event == XFRM_MSG_DELSA) {
3118 struct nlattr *attr;
3120 id = nlmsg_data(nlh);
3121 memset(id, 0, sizeof(*id));
3122 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
3123 id->spi = x->id.spi;
3124 id->family = x->props.family;
3125 id->proto = x->id.proto;
3127 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
3134 err = copy_to_user_state_extra(x, p, skb);
3138 nlmsg_end(skb, nlh);
3140 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3147 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
3151 case XFRM_MSG_EXPIRE:
3152 return xfrm_exp_state_notify(x, c);
3153 case XFRM_MSG_NEWAE:
3154 return xfrm_aevent_state_notify(x, c);
3155 case XFRM_MSG_DELSA:
3156 case XFRM_MSG_UPDSA:
3157 case XFRM_MSG_NEWSA:
3158 return xfrm_notify_sa(x, c);
3159 case XFRM_MSG_FLUSHSA:
3160 return xfrm_notify_sa_flush(c);
3162 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
3171 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
3172 struct xfrm_policy *xp)
3174 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
3175 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3176 + nla_total_size(sizeof(struct xfrm_mark))
3177 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
3178 + userpolicy_type_attrsize();
3181 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
3182 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
3184 __u32 seq = xfrm_get_acqseq();
3185 struct xfrm_user_acquire *ua;
3186 struct nlmsghdr *nlh;
3189 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
3193 ua = nlmsg_data(nlh);
3194 memcpy(&ua->id, &x->id, sizeof(ua->id));
3195 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
3196 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
3197 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
3198 ua->aalgos = xt->aalgos;
3199 ua->ealgos = xt->ealgos;
3200 ua->calgos = xt->calgos;
3201 ua->seq = x->km.seq = seq;
3203 err = copy_to_user_tmpl(xp, skb);
3205 err = copy_to_user_state_sec_ctx(x, skb);
3207 err = copy_to_user_policy_type(xp->type, skb);
3209 err = xfrm_mark_put(skb, &xp->mark);
3211 err = xfrm_if_id_put(skb, xp->if_id);
3213 nlmsg_cancel(skb, nlh);
3217 nlmsg_end(skb, nlh);
3221 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
3222 struct xfrm_policy *xp)
3224 struct net *net = xs_net(x);
3225 struct sk_buff *skb;
3228 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
3232 err = build_acquire(skb, x, xt, xp);
3235 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
3238 /* User gives us xfrm_user_policy_info followed by an array of 0
3239 * or more templates.
3241 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
3242 u8 *data, int len, int *dir)
3244 struct net *net = sock_net(sk);
3245 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
3246 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
3247 struct xfrm_policy *xp;
3250 switch (sk->sk_family) {
3252 if (opt != IP_XFRM_POLICY) {
3257 #if IS_ENABLED(CONFIG_IPV6)
3259 if (opt != IPV6_XFRM_POLICY) {
3272 if (len < sizeof(*p) ||
3273 verify_newpolicy_info(p))
3276 nr = ((len - sizeof(*p)) / sizeof(*ut));
3277 if (validate_tmpl(nr, ut, p->sel.family))
3280 if (p->dir > XFRM_POLICY_OUT)
3283 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
3289 copy_from_user_policy(xp, p);
3290 xp->type = XFRM_POLICY_TYPE_MAIN;
3291 copy_templates(xp, ut, nr);
3298 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
3300 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
3301 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3302 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
3303 + nla_total_size(sizeof(struct xfrm_mark))
3304 + userpolicy_type_attrsize();
3307 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
3308 int dir, const struct km_event *c)
3310 struct xfrm_user_polexpire *upe;
3311 int hard = c->data.hard;
3312 struct nlmsghdr *nlh;
3315 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
3319 upe = nlmsg_data(nlh);
3320 copy_to_user_policy(xp, &upe->pol, dir);
3321 err = copy_to_user_tmpl(xp, skb);
3323 err = copy_to_user_sec_ctx(xp, skb);
3325 err = copy_to_user_policy_type(xp->type, skb);
3327 err = xfrm_mark_put(skb, &xp->mark);
3329 err = xfrm_if_id_put(skb, xp->if_id);
3331 nlmsg_cancel(skb, nlh);
3336 nlmsg_end(skb, nlh);
3340 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3342 struct net *net = xp_net(xp);
3343 struct sk_buff *skb;
3346 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
3350 err = build_polexpire(skb, xp, dir, c);
3353 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3356 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
3358 unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
3359 struct net *net = xp_net(xp);
3360 struct xfrm_userpolicy_info *p;
3361 struct xfrm_userpolicy_id *id;
3362 struct nlmsghdr *nlh;
3363 struct sk_buff *skb;
3364 unsigned int headlen;
3367 headlen = sizeof(*p);
3368 if (c->event == XFRM_MSG_DELPOLICY) {
3369 len += nla_total_size(headlen);
3370 headlen = sizeof(*id);
3372 len += userpolicy_type_attrsize();
3373 len += nla_total_size(sizeof(struct xfrm_mark));
3374 len += NLMSG_ALIGN(headlen);
3376 skb = nlmsg_new(len, GFP_ATOMIC);
3380 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3385 p = nlmsg_data(nlh);
3386 if (c->event == XFRM_MSG_DELPOLICY) {
3387 struct nlattr *attr;
3389 id = nlmsg_data(nlh);
3390 memset(id, 0, sizeof(*id));
3393 id->index = xp->index;
3395 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
3397 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
3405 copy_to_user_policy(xp, p, dir);
3406 err = copy_to_user_tmpl(xp, skb);
3408 err = copy_to_user_policy_type(xp->type, skb);
3410 err = xfrm_mark_put(skb, &xp->mark);
3412 err = xfrm_if_id_put(skb, xp->if_id);
3416 nlmsg_end(skb, nlh);
3418 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3425 static int xfrm_notify_policy_flush(const struct km_event *c)
3427 struct net *net = c->net;
3428 struct nlmsghdr *nlh;
3429 struct sk_buff *skb;
3432 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
3436 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
3440 err = copy_to_user_policy_type(c->data.type, skb);
3444 nlmsg_end(skb, nlh);
3446 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3453 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3457 case XFRM_MSG_NEWPOLICY:
3458 case XFRM_MSG_UPDPOLICY:
3459 case XFRM_MSG_DELPOLICY:
3460 return xfrm_notify_policy(xp, dir, c);
3461 case XFRM_MSG_FLUSHPOLICY:
3462 return xfrm_notify_policy_flush(c);
3463 case XFRM_MSG_POLEXPIRE:
3464 return xfrm_exp_policy_notify(xp, dir, c);
3466 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
3474 static inline unsigned int xfrm_report_msgsize(void)
3476 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
3479 static int build_report(struct sk_buff *skb, u8 proto,
3480 struct xfrm_selector *sel, xfrm_address_t *addr)
3482 struct xfrm_user_report *ur;
3483 struct nlmsghdr *nlh;
3485 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
3489 ur = nlmsg_data(nlh);
3491 memcpy(&ur->sel, sel, sizeof(ur->sel));
3494 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
3496 nlmsg_cancel(skb, nlh);
3500 nlmsg_end(skb, nlh);
3504 static int xfrm_send_report(struct net *net, u8 proto,
3505 struct xfrm_selector *sel, xfrm_address_t *addr)
3507 struct sk_buff *skb;
3510 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
3514 err = build_report(skb, proto, sel, addr);
3517 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
3520 static inline unsigned int xfrm_mapping_msgsize(void)
3522 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
3525 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
3526 xfrm_address_t *new_saddr, __be16 new_sport)
3528 struct xfrm_user_mapping *um;
3529 struct nlmsghdr *nlh;
3531 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
3535 um = nlmsg_data(nlh);
3537 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
3538 um->id.spi = x->id.spi;
3539 um->id.family = x->props.family;
3540 um->id.proto = x->id.proto;
3541 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
3542 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
3543 um->new_sport = new_sport;
3544 um->old_sport = x->encap->encap_sport;
3545 um->reqid = x->props.reqid;
3547 nlmsg_end(skb, nlh);
3551 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3554 struct net *net = xs_net(x);
3555 struct sk_buff *skb;
3558 if (x->id.proto != IPPROTO_ESP)
3564 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
3568 err = build_mapping(skb, x, ipaddr, sport);
3571 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
3574 static bool xfrm_is_alive(const struct km_event *c)
3576 return (bool)xfrm_acquire_is_on(c->net);
3579 static struct xfrm_mgr netlink_mgr = {
3580 .notify = xfrm_send_state_notify,
3581 .acquire = xfrm_send_acquire,
3582 .compile_policy = xfrm_compile_policy,
3583 .notify_policy = xfrm_send_policy_notify,
3584 .report = xfrm_send_report,
3585 .migrate = xfrm_send_migrate,
3586 .new_mapping = xfrm_send_mapping,
3587 .is_alive = xfrm_is_alive,
3590 static int __net_init xfrm_user_net_init(struct net *net)
3593 struct netlink_kernel_cfg cfg = {
3594 .groups = XFRMNLGRP_MAX,
3595 .input = xfrm_netlink_rcv,
3598 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
3601 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
3602 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
3606 static void __net_exit xfrm_user_net_pre_exit(struct net *net)
3608 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
3611 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
3615 list_for_each_entry(net, net_exit_list, exit_list)
3616 netlink_kernel_release(net->xfrm.nlsk_stash);
3619 static struct pernet_operations xfrm_user_net_ops = {
3620 .init = xfrm_user_net_init,
3621 .pre_exit = xfrm_user_net_pre_exit,
3622 .exit_batch = xfrm_user_net_exit,
3625 static int __init xfrm_user_init(void)
3629 printk(KERN_INFO "Initializing XFRM netlink socket\n");
3631 rv = register_pernet_subsys(&xfrm_user_net_ops);
3634 rv = xfrm_register_km(&netlink_mgr);
3636 unregister_pernet_subsys(&xfrm_user_net_ops);
3640 static void __exit xfrm_user_exit(void)
3642 xfrm_unregister_km(&netlink_mgr);
3643 unregister_pernet_subsys(&xfrm_user_net_ops);
3646 module_init(xfrm_user_init);
3647 module_exit(xfrm_user_exit);
3648 MODULE_LICENSE("GPL");
3649 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);