1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/macsec.c - MACsec device
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
8 #include <linux/types.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/module.h>
12 #include <crypto/aead.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/refcount.h>
17 #include <net/genetlink.h>
19 #include <net/gro_cells.h>
20 #include <net/macsec.h>
21 #include <linux/phy.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/if_arp.h>
25 #include <uapi/linux/if_macsec.h>
27 #define MACSEC_SCI_LEN 8
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
32 struct macsec_eth_header {
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
39 #elif defined(__BIG_ENDIAN_BITFIELD)
43 #error "Please fix <asm/byteorder.h>"
46 u8 secure_channel_id[8]; /* optional */
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES 0x40 /* end station */
51 #define MACSEC_TCI_SC 0x20 /* SCI present */
52 #define MACSEC_TCI_SCB 0x10 /* epon */
53 #define MACSEC_TCI_E 0x08 /* encryption */
54 #define MACSEC_TCI_C 0x04 /* changed text */
55 #define MACSEC_AN_MASK 0x03 /* association number */
56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
64 #define for_each_rxsc(secy, sc) \
65 for (sc = rcu_dereference_bh(secy->rx_sc); \
67 sc = rcu_dereference_bh(sc->next))
68 #define for_each_rxsc_rtnl(secy, sc) \
69 for (sc = rtnl_dereference(secy->rx_sc); \
71 sc = rtnl_dereference(sc->next))
73 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
77 u8 short_secure_channel_id[4];
85 u8 secure_channel_id[8];
91 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
93 struct pcpu_secy_stats {
94 struct macsec_dev_stats stats;
95 struct u64_stats_sync syncp;
99 * struct macsec_dev - private data
101 * @real_dev: pointer to underlying netdevice
102 * @dev_tracker: refcount tracker for @real_dev reference
103 * @stats: MACsec device stats
104 * @secys: linked list of SecY's on the underlying device
105 * @gro_cells: pointer to the Generic Receive Offload cell
106 * @offload: status of offloading on the MACsec device
109 struct macsec_secy secy;
110 struct net_device *real_dev;
111 netdevice_tracker dev_tracker;
112 struct pcpu_secy_stats __percpu *stats;
113 struct list_head secys;
114 struct gro_cells gro_cells;
115 enum macsec_offload offload;
119 * struct macsec_rxh_data - rx_handler private argument
120 * @secys: linked list of SecY's on this underlying device
122 struct macsec_rxh_data {
123 struct list_head secys;
126 static struct macsec_dev *macsec_priv(const struct net_device *dev)
128 return (struct macsec_dev *)netdev_priv(dev);
131 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
133 return rcu_dereference_bh(dev->rx_handler_data);
136 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
138 return rtnl_dereference(dev->rx_handler_data);
142 struct aead_request *req;
144 struct macsec_tx_sa *tx_sa;
145 struct macsec_rx_sa *rx_sa;
152 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
154 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
156 if (!sa || !sa->active)
159 if (!refcount_inc_not_zero(&sa->refcnt))
165 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
167 struct macsec_rx_sa *sa = NULL;
170 for (an = 0; an < MACSEC_NUM_AN; an++) {
171 sa = macsec_rxsa_get(rx_sc->sa[an]);
178 static void free_rx_sc_rcu(struct rcu_head *head)
180 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
182 free_percpu(rx_sc->stats);
186 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
188 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
191 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
193 if (refcount_dec_and_test(&sc->refcnt))
194 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
197 static void free_rxsa(struct rcu_head *head)
199 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
201 crypto_free_aead(sa->key.tfm);
202 free_percpu(sa->stats);
206 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
208 if (refcount_dec_and_test(&sa->refcnt))
209 call_rcu(&sa->rcu, free_rxsa);
212 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
214 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
216 if (!sa || !sa->active)
219 if (!refcount_inc_not_zero(&sa->refcnt))
225 static void free_txsa(struct rcu_head *head)
227 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
229 crypto_free_aead(sa->key.tfm);
230 free_percpu(sa->stats);
234 static void macsec_txsa_put(struct macsec_tx_sa *sa)
236 if (refcount_dec_and_test(&sa->refcnt))
237 call_rcu(&sa->rcu, free_txsa);
240 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
242 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
243 return (struct macsec_cb *)skb->cb;
246 #define MACSEC_PORT_ES (htons(0x0001))
247 #define MACSEC_PORT_SCB (0x0000)
248 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
249 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
251 #define MACSEC_GCM_AES_128_SAK_LEN 16
252 #define MACSEC_GCM_AES_256_SAK_LEN 32
254 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
255 #define DEFAULT_XPN false
256 #define DEFAULT_SEND_SCI true
257 #define DEFAULT_ENCRYPT false
258 #define DEFAULT_ENCODING_SA 0
259 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
261 static bool send_sci(const struct macsec_secy *secy)
263 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
265 return tx_sc->send_sci ||
266 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
269 static sci_t make_sci(const u8 *addr, __be16 port)
273 memcpy(&sci, addr, ETH_ALEN);
274 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
279 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
284 memcpy(&sci, hdr->secure_channel_id,
285 sizeof(hdr->secure_channel_id));
287 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
292 static unsigned int macsec_sectag_len(bool sci_present)
294 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
297 static unsigned int macsec_hdr_len(bool sci_present)
299 return macsec_sectag_len(sci_present) + ETH_HLEN;
302 static unsigned int macsec_extra_len(bool sci_present)
304 return macsec_sectag_len(sci_present) + sizeof(__be16);
307 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
308 static void macsec_fill_sectag(struct macsec_eth_header *h,
309 const struct macsec_secy *secy, u32 pn,
312 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
314 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
315 h->eth.h_proto = htons(ETH_P_MACSEC);
318 h->tci_an |= MACSEC_TCI_SC;
319 memcpy(&h->secure_channel_id, &secy->sci,
320 sizeof(h->secure_channel_id));
322 if (tx_sc->end_station)
323 h->tci_an |= MACSEC_TCI_ES;
325 h->tci_an |= MACSEC_TCI_SCB;
328 h->packet_number = htonl(pn);
330 /* with GCM, C/E clear for !encrypt, both set for encrypt */
332 h->tci_an |= MACSEC_TCI_CONFID;
333 else if (secy->icv_len != DEFAULT_ICV_LEN)
334 h->tci_an |= MACSEC_TCI_C;
336 h->tci_an |= tx_sc->encoding_sa;
339 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
341 if (data_len < MIN_NON_SHORT_LEN)
342 h->short_length = data_len;
345 /* Checks if a MACsec interface is being offloaded to an hardware engine */
346 static bool macsec_is_offloaded(struct macsec_dev *macsec)
348 if (macsec->offload == MACSEC_OFFLOAD_MAC ||
349 macsec->offload == MACSEC_OFFLOAD_PHY)
355 /* Checks if underlying layers implement MACsec offloading functions. */
356 static bool macsec_check_offload(enum macsec_offload offload,
357 struct macsec_dev *macsec)
359 if (!macsec || !macsec->real_dev)
362 if (offload == MACSEC_OFFLOAD_PHY)
363 return macsec->real_dev->phydev &&
364 macsec->real_dev->phydev->macsec_ops;
365 else if (offload == MACSEC_OFFLOAD_MAC)
366 return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
367 macsec->real_dev->macsec_ops;
372 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
373 struct macsec_dev *macsec,
374 struct macsec_context *ctx)
377 memset(ctx, 0, sizeof(*ctx));
378 ctx->offload = offload;
380 if (offload == MACSEC_OFFLOAD_PHY)
381 ctx->phydev = macsec->real_dev->phydev;
382 else if (offload == MACSEC_OFFLOAD_MAC)
383 ctx->netdev = macsec->real_dev;
386 if (offload == MACSEC_OFFLOAD_PHY)
387 return macsec->real_dev->phydev->macsec_ops;
389 return macsec->real_dev->macsec_ops;
392 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
393 * context device reference if provided.
395 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
396 struct macsec_context *ctx)
398 if (!macsec_check_offload(macsec->offload, macsec))
401 return __macsec_get_ops(macsec->offload, macsec, ctx);
404 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
405 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
407 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
408 int len = skb->len - 2 * ETH_ALEN;
409 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
411 /* a) It comprises at least 17 octets */
415 /* b) MACsec EtherType: already checked */
417 /* c) V bit is clear */
418 if (h->tci_an & MACSEC_TCI_VERSION)
421 /* d) ES or SCB => !SC */
422 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
423 (h->tci_an & MACSEC_TCI_SC))
426 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
430 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
431 if (!h->packet_number && !xpn)
434 /* length check, f) g) h) i) */
436 return len == extra_len + h->short_length;
437 return len >= extra_len + MIN_NON_SHORT_LEN;
440 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
441 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
443 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
446 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
448 gcm_iv->ssci = ssci ^ salt.ssci;
449 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
452 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
454 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
457 gcm_iv->pn = htonl(pn);
460 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
462 return (struct macsec_eth_header *)skb_mac_header(skb);
465 static void __macsec_pn_wrapped(struct macsec_secy *secy,
466 struct macsec_tx_sa *tx_sa)
468 pr_debug("PN wrapped, transitioning to !oper\n");
469 tx_sa->active = false;
470 if (secy->protect_frames)
471 secy->operational = false;
474 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
476 spin_lock_bh(&tx_sa->lock);
477 __macsec_pn_wrapped(secy, tx_sa);
478 spin_unlock_bh(&tx_sa->lock);
480 EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
482 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
483 struct macsec_secy *secy)
487 spin_lock_bh(&tx_sa->lock);
489 pn = tx_sa->next_pn_halves;
493 tx_sa->next_pn_halves.lower++;
495 if (tx_sa->next_pn == 0)
496 __macsec_pn_wrapped(secy, tx_sa);
497 spin_unlock_bh(&tx_sa->lock);
502 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
504 struct macsec_dev *macsec = netdev_priv(dev);
506 skb->dev = macsec->real_dev;
507 skb_reset_mac_header(skb);
508 skb->protocol = eth_hdr(skb)->h_proto;
511 static unsigned int macsec_msdu_len(struct sk_buff *skb)
513 struct macsec_dev *macsec = macsec_priv(skb->dev);
514 struct macsec_secy *secy = &macsec->secy;
515 bool sci_present = macsec_skb_cb(skb)->has_sci;
517 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
520 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
521 struct macsec_tx_sa *tx_sa)
523 unsigned int msdu_len = macsec_msdu_len(skb);
524 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
526 u64_stats_update_begin(&txsc_stats->syncp);
527 if (tx_sc->encrypt) {
528 txsc_stats->stats.OutOctetsEncrypted += msdu_len;
529 txsc_stats->stats.OutPktsEncrypted++;
530 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
532 txsc_stats->stats.OutOctetsProtected += msdu_len;
533 txsc_stats->stats.OutPktsProtected++;
534 this_cpu_inc(tx_sa->stats->OutPktsProtected);
536 u64_stats_update_end(&txsc_stats->syncp);
539 static void count_tx(struct net_device *dev, int ret, int len)
541 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
542 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
544 u64_stats_update_begin(&stats->syncp);
545 u64_stats_inc(&stats->tx_packets);
546 u64_stats_add(&stats->tx_bytes, len);
547 u64_stats_update_end(&stats->syncp);
551 static void macsec_encrypt_done(struct crypto_async_request *base, int err)
553 struct sk_buff *skb = base->data;
554 struct net_device *dev = skb->dev;
555 struct macsec_dev *macsec = macsec_priv(dev);
556 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
559 aead_request_free(macsec_skb_cb(skb)->req);
562 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
563 /* packet is encrypted/protected so tx_bytes must be calculated */
564 len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
565 macsec_encrypt_finish(skb, dev);
566 ret = dev_queue_xmit(skb);
567 count_tx(dev, ret, len);
568 rcu_read_unlock_bh();
574 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
576 struct scatterlist **sg,
579 size_t size, iv_offset, sg_offset;
580 struct aead_request *req;
583 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
585 size += GCM_AES_IV_LEN;
587 size = ALIGN(size, __alignof__(struct scatterlist));
589 size += sizeof(struct scatterlist) * num_frags;
591 tmp = kmalloc(size, GFP_ATOMIC);
595 *iv = (unsigned char *)(tmp + iv_offset);
596 *sg = (struct scatterlist *)(tmp + sg_offset);
599 aead_request_set_tfm(req, tfm);
604 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
605 struct net_device *dev)
608 struct scatterlist *sg;
609 struct sk_buff *trailer;
612 struct macsec_eth_header *hh;
613 size_t unprotected_len;
614 struct aead_request *req;
615 struct macsec_secy *secy;
616 struct macsec_tx_sc *tx_sc;
617 struct macsec_tx_sa *tx_sa;
618 struct macsec_dev *macsec = macsec_priv(dev);
622 secy = &macsec->secy;
623 tx_sc = &secy->tx_sc;
625 /* 10.5.1 TX SA assignment */
626 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
628 secy->operational = false;
630 return ERR_PTR(-EINVAL);
633 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
634 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
635 struct sk_buff *nskb = skb_copy_expand(skb,
636 MACSEC_NEEDED_HEADROOM,
637 MACSEC_NEEDED_TAILROOM,
643 macsec_txsa_put(tx_sa);
645 return ERR_PTR(-ENOMEM);
648 skb = skb_unshare(skb, GFP_ATOMIC);
650 macsec_txsa_put(tx_sa);
651 return ERR_PTR(-ENOMEM);
655 unprotected_len = skb->len;
657 sci_present = send_sci(secy);
658 hh = skb_push(skb, macsec_extra_len(sci_present));
659 memmove(hh, eth, 2 * ETH_ALEN);
661 pn = tx_sa_update_pn(tx_sa, secy);
662 if (pn.full64 == 0) {
663 macsec_txsa_put(tx_sa);
665 return ERR_PTR(-ENOLINK);
667 macsec_fill_sectag(hh, secy, pn.lower, sci_present);
668 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
670 skb_put(skb, secy->icv_len);
672 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
673 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
675 u64_stats_update_begin(&secy_stats->syncp);
676 secy_stats->stats.OutPktsTooLong++;
677 u64_stats_update_end(&secy_stats->syncp);
679 macsec_txsa_put(tx_sa);
681 return ERR_PTR(-EINVAL);
684 ret = skb_cow_data(skb, 0, &trailer);
685 if (unlikely(ret < 0)) {
686 macsec_txsa_put(tx_sa);
691 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
693 macsec_txsa_put(tx_sa);
695 return ERR_PTR(-ENOMEM);
699 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
701 macsec_fill_iv(iv, secy->sci, pn.lower);
703 sg_init_table(sg, ret);
704 ret = skb_to_sgvec(skb, sg, 0, skb->len);
705 if (unlikely(ret < 0)) {
706 aead_request_free(req);
707 macsec_txsa_put(tx_sa);
712 if (tx_sc->encrypt) {
713 int len = skb->len - macsec_hdr_len(sci_present) -
715 aead_request_set_crypt(req, sg, sg, len, iv);
716 aead_request_set_ad(req, macsec_hdr_len(sci_present));
718 aead_request_set_crypt(req, sg, sg, 0, iv);
719 aead_request_set_ad(req, skb->len - secy->icv_len);
722 macsec_skb_cb(skb)->req = req;
723 macsec_skb_cb(skb)->tx_sa = tx_sa;
724 macsec_skb_cb(skb)->has_sci = sci_present;
725 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
728 ret = crypto_aead_encrypt(req);
729 if (ret == -EINPROGRESS) {
731 } else if (ret != 0) {
734 aead_request_free(req);
735 macsec_txsa_put(tx_sa);
736 return ERR_PTR(-EINVAL);
740 aead_request_free(req);
741 macsec_txsa_put(tx_sa);
746 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
748 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
749 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
750 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
753 spin_lock(&rx_sa->lock);
754 if (rx_sa->next_pn_halves.lower >= secy->replay_window)
755 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
757 /* Now perform replay protection check again
758 * (see IEEE 802.1AE-2006 figure 10-5)
760 if (secy->replay_protect && pn < lowest_pn &&
761 (!secy->xpn || pn_same_half(pn, lowest_pn))) {
762 spin_unlock(&rx_sa->lock);
763 u64_stats_update_begin(&rxsc_stats->syncp);
764 rxsc_stats->stats.InPktsLate++;
765 u64_stats_update_end(&rxsc_stats->syncp);
766 secy->netdev->stats.rx_dropped++;
770 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
771 unsigned int msdu_len = macsec_msdu_len(skb);
772 u64_stats_update_begin(&rxsc_stats->syncp);
773 if (hdr->tci_an & MACSEC_TCI_E)
774 rxsc_stats->stats.InOctetsDecrypted += msdu_len;
776 rxsc_stats->stats.InOctetsValidated += msdu_len;
777 u64_stats_update_end(&rxsc_stats->syncp);
780 if (!macsec_skb_cb(skb)->valid) {
781 spin_unlock(&rx_sa->lock);
784 if (hdr->tci_an & MACSEC_TCI_C ||
785 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
786 u64_stats_update_begin(&rxsc_stats->syncp);
787 rxsc_stats->stats.InPktsNotValid++;
788 u64_stats_update_end(&rxsc_stats->syncp);
789 this_cpu_inc(rx_sa->stats->InPktsNotValid);
790 secy->netdev->stats.rx_errors++;
794 u64_stats_update_begin(&rxsc_stats->syncp);
795 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
796 rxsc_stats->stats.InPktsInvalid++;
797 this_cpu_inc(rx_sa->stats->InPktsInvalid);
798 } else if (pn < lowest_pn) {
799 rxsc_stats->stats.InPktsDelayed++;
801 rxsc_stats->stats.InPktsUnchecked++;
803 u64_stats_update_end(&rxsc_stats->syncp);
805 u64_stats_update_begin(&rxsc_stats->syncp);
806 if (pn < lowest_pn) {
807 rxsc_stats->stats.InPktsDelayed++;
809 rxsc_stats->stats.InPktsOK++;
810 this_cpu_inc(rx_sa->stats->InPktsOK);
812 u64_stats_update_end(&rxsc_stats->syncp);
814 // Instead of "pn >=" - to support pn overflow in xpn
815 if (pn + 1 > rx_sa->next_pn_halves.lower) {
816 rx_sa->next_pn_halves.lower = pn + 1;
817 } else if (secy->xpn &&
818 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
819 rx_sa->next_pn_halves.upper++;
820 rx_sa->next_pn_halves.lower = pn + 1;
823 spin_unlock(&rx_sa->lock);
829 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
831 skb->pkt_type = PACKET_HOST;
832 skb->protocol = eth_type_trans(skb, dev);
834 skb_reset_network_header(skb);
835 if (!skb_transport_header_was_set(skb))
836 skb_reset_transport_header(skb);
837 skb_reset_mac_len(skb);
840 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
842 skb->ip_summed = CHECKSUM_NONE;
843 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
844 skb_pull(skb, hdr_len);
845 pskb_trim_unique(skb, skb->len - icv_len);
848 static void count_rx(struct net_device *dev, int len)
850 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
852 u64_stats_update_begin(&stats->syncp);
853 u64_stats_inc(&stats->rx_packets);
854 u64_stats_add(&stats->rx_bytes, len);
855 u64_stats_update_end(&stats->syncp);
858 static void macsec_decrypt_done(struct crypto_async_request *base, int err)
860 struct sk_buff *skb = base->data;
861 struct net_device *dev = skb->dev;
862 struct macsec_dev *macsec = macsec_priv(dev);
863 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
864 struct macsec_rx_sc *rx_sc = rx_sa->sc;
868 aead_request_free(macsec_skb_cb(skb)->req);
871 macsec_skb_cb(skb)->valid = true;
874 pn = ntohl(macsec_ethhdr(skb)->packet_number);
875 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
876 rcu_read_unlock_bh();
881 macsec_finalize_skb(skb, macsec->secy.icv_len,
882 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
884 macsec_reset_skb(skb, macsec->secy.netdev);
886 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
889 rcu_read_unlock_bh();
892 macsec_rxsa_put(rx_sa);
893 macsec_rxsc_put(rx_sc);
897 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
898 struct net_device *dev,
899 struct macsec_rx_sa *rx_sa,
901 struct macsec_secy *secy)
904 struct scatterlist *sg;
905 struct sk_buff *trailer;
907 struct aead_request *req;
908 struct macsec_eth_header *hdr;
910 u16 icv_len = secy->icv_len;
912 macsec_skb_cb(skb)->valid = false;
913 skb = skb_share_check(skb, GFP_ATOMIC);
915 return ERR_PTR(-ENOMEM);
917 ret = skb_cow_data(skb, 0, &trailer);
918 if (unlikely(ret < 0)) {
922 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
925 return ERR_PTR(-ENOMEM);
928 hdr = (struct macsec_eth_header *)skb->data;
929 hdr_pn = ntohl(hdr->packet_number);
932 pn_t recovered_pn = rx_sa->next_pn_halves;
934 recovered_pn.lower = hdr_pn;
935 if (hdr_pn < rx_sa->next_pn_halves.lower &&
936 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
937 recovered_pn.upper++;
939 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
942 macsec_fill_iv(iv, sci, hdr_pn);
945 sg_init_table(sg, ret);
946 ret = skb_to_sgvec(skb, sg, 0, skb->len);
947 if (unlikely(ret < 0)) {
948 aead_request_free(req);
953 if (hdr->tci_an & MACSEC_TCI_E) {
954 /* confidentiality: ethernet + macsec header
955 * authenticated, encrypted payload
957 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
959 aead_request_set_crypt(req, sg, sg, len, iv);
960 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
961 skb = skb_unshare(skb, GFP_ATOMIC);
963 aead_request_free(req);
964 return ERR_PTR(-ENOMEM);
967 /* integrity only: all headers + data authenticated */
968 aead_request_set_crypt(req, sg, sg, icv_len, iv);
969 aead_request_set_ad(req, skb->len - icv_len);
972 macsec_skb_cb(skb)->req = req;
974 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
977 ret = crypto_aead_decrypt(req);
978 if (ret == -EINPROGRESS) {
980 } else if (ret != 0) {
981 /* decryption/authentication failed
982 * 10.6 if validateFrames is disabled, deliver anyway
984 if (ret != -EBADMSG) {
989 macsec_skb_cb(skb)->valid = true;
993 aead_request_free(req);
998 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
1000 struct macsec_rx_sc *rx_sc;
1002 for_each_rxsc(secy, rx_sc) {
1003 if (rx_sc->sci == sci)
1010 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
1012 struct macsec_rx_sc *rx_sc;
1014 for_each_rxsc_rtnl(secy, rx_sc) {
1015 if (rx_sc->sci == sci)
1022 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
1024 /* Deliver to the uncontrolled port by default */
1025 enum rx_handler_result ret = RX_HANDLER_PASS;
1026 struct ethhdr *hdr = eth_hdr(skb);
1027 struct macsec_rxh_data *rxd;
1028 struct macsec_dev *macsec;
1031 rxd = macsec_data_rcu(skb->dev);
1033 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1034 struct sk_buff *nskb;
1035 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1036 struct net_device *ndev = macsec->secy.netdev;
1038 /* If h/w offloading is enabled, HW decodes frames and strips
1039 * the SecTAG, so we have to deduce which port to deliver to.
1041 if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1042 if (ether_addr_equal_64bits(hdr->h_dest,
1044 /* exact match, divert skb to this port */
1046 skb->pkt_type = PACKET_HOST;
1047 ret = RX_HANDLER_ANOTHER;
1049 } else if (is_multicast_ether_addr_64bits(
1051 /* multicast frame, deliver on this port too */
1052 nskb = skb_clone(skb, GFP_ATOMIC);
1057 if (ether_addr_equal_64bits(hdr->h_dest,
1059 nskb->pkt_type = PACKET_BROADCAST;
1061 nskb->pkt_type = PACKET_MULTICAST;
1068 /* 10.6 If the management control validateFrames is not
1069 * Strict, frames without a SecTAG are received, counted, and
1070 * delivered to the Controlled Port
1072 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1073 u64_stats_update_begin(&secy_stats->syncp);
1074 secy_stats->stats.InPktsNoTag++;
1075 u64_stats_update_end(&secy_stats->syncp);
1076 macsec->secy.netdev->stats.rx_dropped++;
1080 /* deliver on this port */
1081 nskb = skb_clone(skb, GFP_ATOMIC);
1087 if (__netif_rx(nskb) == NET_RX_SUCCESS) {
1088 u64_stats_update_begin(&secy_stats->syncp);
1089 secy_stats->stats.InPktsUntagged++;
1090 u64_stats_update_end(&secy_stats->syncp);
1099 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1101 struct sk_buff *skb = *pskb;
1102 struct net_device *dev = skb->dev;
1103 struct macsec_eth_header *hdr;
1104 struct macsec_secy *secy = NULL;
1105 struct macsec_rx_sc *rx_sc;
1106 struct macsec_rx_sa *rx_sa;
1107 struct macsec_rxh_data *rxd;
1108 struct macsec_dev *macsec;
1113 struct pcpu_rx_sc_stats *rxsc_stats;
1114 struct pcpu_secy_stats *secy_stats;
1118 if (skb_headroom(skb) < ETH_HLEN)
1121 hdr = macsec_ethhdr(skb);
1122 if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1123 return handle_not_macsec(skb);
1125 skb = skb_unshare(skb, GFP_ATOMIC);
1128 return RX_HANDLER_CONSUMED;
1130 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1132 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1136 hdr = macsec_ethhdr(skb);
1138 /* Frames with a SecTAG that has the TCI E bit set but the C
1139 * bit clear are discarded, as this reserved encoding is used
1140 * to identify frames with a SecTAG that are not to be
1141 * delivered to the Controlled Port.
1143 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1144 return RX_HANDLER_PASS;
1146 /* now, pull the extra length */
1147 if (hdr->tci_an & MACSEC_TCI_SC) {
1152 /* ethernet header is part of crypto processing */
1153 skb_push(skb, ETH_HLEN);
1155 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1156 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1157 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1160 rxd = macsec_data_rcu(skb->dev);
1162 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1163 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1165 sc = sc ? macsec_rxsc_get(sc) : NULL;
1168 secy = &macsec->secy;
1178 macsec = macsec_priv(dev);
1179 secy_stats = this_cpu_ptr(macsec->stats);
1180 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1182 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1183 u64_stats_update_begin(&secy_stats->syncp);
1184 secy_stats->stats.InPktsBadTag++;
1185 u64_stats_update_end(&secy_stats->syncp);
1186 secy->netdev->stats.rx_errors++;
1190 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1192 /* 10.6.1 if the SA is not in use */
1194 /* If validateFrames is Strict or the C bit in the
1195 * SecTAG is set, discard
1197 struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
1198 if (hdr->tci_an & MACSEC_TCI_C ||
1199 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1200 u64_stats_update_begin(&rxsc_stats->syncp);
1201 rxsc_stats->stats.InPktsNotUsingSA++;
1202 u64_stats_update_end(&rxsc_stats->syncp);
1203 secy->netdev->stats.rx_errors++;
1205 this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
1209 /* not Strict, the frame (with the SecTAG and ICV
1210 * removed) is delivered to the Controlled Port.
1212 u64_stats_update_begin(&rxsc_stats->syncp);
1213 rxsc_stats->stats.InPktsUnusedSA++;
1214 u64_stats_update_end(&rxsc_stats->syncp);
1216 this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
1220 /* First, PN check to avoid decrypting obviously wrong packets */
1221 hdr_pn = ntohl(hdr->packet_number);
1222 if (secy->replay_protect) {
1225 spin_lock(&rx_sa->lock);
1226 late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1227 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1230 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1231 spin_unlock(&rx_sa->lock);
1234 u64_stats_update_begin(&rxsc_stats->syncp);
1235 rxsc_stats->stats.InPktsLate++;
1236 u64_stats_update_end(&rxsc_stats->syncp);
1237 macsec->secy.netdev->stats.rx_dropped++;
1242 macsec_skb_cb(skb)->rx_sa = rx_sa;
1244 /* Disabled && !changed text => skip validation */
1245 if (hdr->tci_an & MACSEC_TCI_C ||
1246 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1247 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1250 /* the decrypt callback needs the reference */
1251 if (PTR_ERR(skb) != -EINPROGRESS) {
1252 macsec_rxsa_put(rx_sa);
1253 macsec_rxsc_put(rx_sc);
1257 return RX_HANDLER_CONSUMED;
1260 if (!macsec_post_decrypt(skb, secy, hdr_pn))
1264 macsec_finalize_skb(skb, secy->icv_len,
1265 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1267 macsec_reset_skb(skb, secy->netdev);
1270 macsec_rxsa_put(rx_sa);
1271 macsec_rxsc_put(rx_sc);
1274 ret = gro_cells_receive(&macsec->gro_cells, skb);
1275 if (ret == NET_RX_SUCCESS)
1278 macsec->secy.netdev->stats.rx_dropped++;
1283 return RX_HANDLER_CONSUMED;
1286 macsec_rxsa_put(rx_sa);
1288 macsec_rxsc_put(rx_sc);
1293 return RX_HANDLER_CONSUMED;
1296 /* 10.6.1 if the SC is not found */
1297 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1299 macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1300 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1302 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1303 struct sk_buff *nskb;
1305 secy_stats = this_cpu_ptr(macsec->stats);
1307 /* If validateFrames is Strict or the C bit in the
1308 * SecTAG is set, discard
1311 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1312 u64_stats_update_begin(&secy_stats->syncp);
1313 secy_stats->stats.InPktsNoSCI++;
1314 u64_stats_update_end(&secy_stats->syncp);
1315 macsec->secy.netdev->stats.rx_errors++;
1319 /* not strict, the frame (with the SecTAG and ICV
1320 * removed) is delivered to the Controlled Port.
1322 nskb = skb_clone(skb, GFP_ATOMIC);
1326 macsec_reset_skb(nskb, macsec->secy.netdev);
1328 ret = __netif_rx(nskb);
1329 if (ret == NET_RX_SUCCESS) {
1330 u64_stats_update_begin(&secy_stats->syncp);
1331 secy_stats->stats.InPktsUnknownSCI++;
1332 u64_stats_update_end(&secy_stats->syncp);
1334 macsec->secy.netdev->stats.rx_dropped++;
1340 return RX_HANDLER_PASS;
1343 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1345 struct crypto_aead *tfm;
1348 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1349 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1354 ret = crypto_aead_setkey(tfm, key, key_len);
1358 ret = crypto_aead_setauthsize(tfm, icv_len);
1364 crypto_free_aead(tfm);
1365 return ERR_PTR(ret);
1368 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1371 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1375 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1376 if (IS_ERR(rx_sa->key.tfm)) {
1377 free_percpu(rx_sa->stats);
1378 return PTR_ERR(rx_sa->key.tfm);
1381 rx_sa->ssci = MACSEC_UNDEF_SSCI;
1382 rx_sa->active = false;
1384 refcount_set(&rx_sa->refcnt, 1);
1385 spin_lock_init(&rx_sa->lock);
1390 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1392 rx_sa->active = false;
1394 macsec_rxsa_put(rx_sa);
1397 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1401 for (i = 0; i < MACSEC_NUM_AN; i++) {
1402 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1404 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1409 macsec_rxsc_put(rx_sc);
1412 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1414 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1416 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1418 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1419 if (rx_sc->sci == sci) {
1422 rcu_assign_pointer(*rx_scp, rx_sc->next);
1430 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1432 struct macsec_rx_sc *rx_sc;
1433 struct macsec_dev *macsec;
1434 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1435 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1436 struct macsec_secy *secy;
1438 list_for_each_entry(macsec, &rxd->secys, secys) {
1439 if (find_rx_sc_rtnl(&macsec->secy, sci))
1440 return ERR_PTR(-EEXIST);
1443 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1445 return ERR_PTR(-ENOMEM);
1447 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1448 if (!rx_sc->stats) {
1450 return ERR_PTR(-ENOMEM);
1454 rx_sc->active = true;
1455 refcount_set(&rx_sc->refcnt, 1);
1457 secy = &macsec_priv(dev)->secy;
1458 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1459 rcu_assign_pointer(secy->rx_sc, rx_sc);
1467 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1470 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1474 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1475 if (IS_ERR(tx_sa->key.tfm)) {
1476 free_percpu(tx_sa->stats);
1477 return PTR_ERR(tx_sa->key.tfm);
1480 tx_sa->ssci = MACSEC_UNDEF_SSCI;
1481 tx_sa->active = false;
1482 refcount_set(&tx_sa->refcnt, 1);
1483 spin_lock_init(&tx_sa->lock);
1488 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1490 tx_sa->active = false;
1492 macsec_txsa_put(tx_sa);
1495 static struct genl_family macsec_fam;
1497 static struct net_device *get_dev_from_nl(struct net *net,
1498 struct nlattr **attrs)
1500 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1501 struct net_device *dev;
1503 dev = __dev_get_by_index(net, ifindex);
1505 return ERR_PTR(-ENODEV);
1507 if (!netif_is_macsec(dev))
1508 return ERR_PTR(-ENODEV);
1513 static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1515 return (__force enum macsec_offload)nla_get_u8(nla);
1518 static sci_t nla_get_sci(const struct nlattr *nla)
1520 return (__force sci_t)nla_get_u64(nla);
1523 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1526 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1529 static ssci_t nla_get_ssci(const struct nlattr *nla)
1531 return (__force ssci_t)nla_get_u32(nla);
1534 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1536 return nla_put_u32(skb, attrtype, (__force u64)value);
1539 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1540 struct nlattr **attrs,
1541 struct nlattr **tb_sa,
1542 struct net_device **devp,
1543 struct macsec_secy **secyp,
1544 struct macsec_tx_sc **scp,
1547 struct net_device *dev;
1548 struct macsec_secy *secy;
1549 struct macsec_tx_sc *tx_sc;
1550 struct macsec_tx_sa *tx_sa;
1552 if (!tb_sa[MACSEC_SA_ATTR_AN])
1553 return ERR_PTR(-EINVAL);
1555 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1557 dev = get_dev_from_nl(net, attrs);
1559 return ERR_CAST(dev);
1561 if (*assoc_num >= MACSEC_NUM_AN)
1562 return ERR_PTR(-EINVAL);
1564 secy = &macsec_priv(dev)->secy;
1565 tx_sc = &secy->tx_sc;
1567 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1569 return ERR_PTR(-ENODEV);
1577 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1578 struct nlattr **attrs,
1579 struct nlattr **tb_rxsc,
1580 struct net_device **devp,
1581 struct macsec_secy **secyp)
1583 struct net_device *dev;
1584 struct macsec_secy *secy;
1585 struct macsec_rx_sc *rx_sc;
1588 dev = get_dev_from_nl(net, attrs);
1590 return ERR_CAST(dev);
1592 secy = &macsec_priv(dev)->secy;
1594 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1595 return ERR_PTR(-EINVAL);
1597 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1598 rx_sc = find_rx_sc_rtnl(secy, sci);
1600 return ERR_PTR(-ENODEV);
1608 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1609 struct nlattr **attrs,
1610 struct nlattr **tb_rxsc,
1611 struct nlattr **tb_sa,
1612 struct net_device **devp,
1613 struct macsec_secy **secyp,
1614 struct macsec_rx_sc **scp,
1617 struct macsec_rx_sc *rx_sc;
1618 struct macsec_rx_sa *rx_sa;
1620 if (!tb_sa[MACSEC_SA_ATTR_AN])
1621 return ERR_PTR(-EINVAL);
1623 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1624 if (*assoc_num >= MACSEC_NUM_AN)
1625 return ERR_PTR(-EINVAL);
1627 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1629 return ERR_CAST(rx_sc);
1631 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1633 return ERR_PTR(-ENODEV);
1639 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1640 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1641 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1642 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1643 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1646 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1647 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1648 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1651 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1652 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1653 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1654 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1655 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1656 .len = MACSEC_KEYID_LEN, },
1657 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1658 .len = MACSEC_MAX_KEY_LEN, },
1659 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1660 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1661 .len = MACSEC_SALT_LEN, },
1664 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1665 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1668 /* Offloads an operation to a device driver */
1669 static int macsec_offload(int (* const func)(struct macsec_context *),
1670 struct macsec_context *ctx)
1674 if (unlikely(!func))
1677 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1678 mutex_lock(&ctx->phydev->lock);
1680 /* Phase I: prepare. The drive should fail here if there are going to be
1681 * issues in the commit phase.
1683 ctx->prepare = true;
1688 /* Phase II: commit. This step cannot fail. */
1689 ctx->prepare = false;
1691 /* This should never happen: commit is not allowed to fail */
1693 WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1696 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1697 mutex_unlock(&ctx->phydev->lock);
1702 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1704 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1707 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1713 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1715 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1718 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1724 static bool validate_add_rxsa(struct nlattr **attrs)
1726 if (!attrs[MACSEC_SA_ATTR_AN] ||
1727 !attrs[MACSEC_SA_ATTR_KEY] ||
1728 !attrs[MACSEC_SA_ATTR_KEYID])
1731 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1734 if (attrs[MACSEC_SA_ATTR_PN] &&
1735 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1738 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1739 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1743 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1749 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1751 struct net_device *dev;
1752 struct nlattr **attrs = info->attrs;
1753 struct macsec_secy *secy;
1754 struct macsec_rx_sc *rx_sc;
1755 struct macsec_rx_sa *rx_sa;
1756 unsigned char assoc_num;
1758 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1759 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1762 if (!attrs[MACSEC_ATTR_IFINDEX])
1765 if (parse_sa_config(attrs, tb_sa))
1768 if (parse_rxsc_config(attrs, tb_rxsc))
1771 if (!validate_add_rxsa(tb_sa))
1775 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1776 if (IS_ERR(rx_sc)) {
1778 return PTR_ERR(rx_sc);
1781 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1783 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1784 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1785 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1790 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1791 if (tb_sa[MACSEC_SA_ATTR_PN] &&
1792 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1793 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1794 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1800 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1805 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1806 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1807 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1814 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1820 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1826 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1827 secy->key_len, secy->icv_len);
1834 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1835 spin_lock_bh(&rx_sa->lock);
1836 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1837 spin_unlock_bh(&rx_sa->lock);
1840 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1841 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1845 /* If h/w offloading is available, propagate to the device */
1846 if (macsec_is_offloaded(netdev_priv(dev))) {
1847 const struct macsec_ops *ops;
1848 struct macsec_context ctx;
1850 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1856 ctx.sa.assoc_num = assoc_num;
1857 ctx.sa.rx_sa = rx_sa;
1859 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1862 err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1868 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1869 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1873 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1874 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1881 macsec_rxsa_put(rx_sa);
1886 static bool validate_add_rxsc(struct nlattr **attrs)
1888 if (!attrs[MACSEC_RXSC_ATTR_SCI])
1891 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1892 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1899 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1901 struct net_device *dev;
1902 sci_t sci = MACSEC_UNDEF_SCI;
1903 struct nlattr **attrs = info->attrs;
1904 struct macsec_rx_sc *rx_sc;
1905 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1906 struct macsec_secy *secy;
1910 if (!attrs[MACSEC_ATTR_IFINDEX])
1913 if (parse_rxsc_config(attrs, tb_rxsc))
1916 if (!validate_add_rxsc(tb_rxsc))
1920 dev = get_dev_from_nl(genl_info_net(info), attrs);
1923 return PTR_ERR(dev);
1926 secy = &macsec_priv(dev)->secy;
1927 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1929 rx_sc = create_rx_sc(dev, sci);
1930 if (IS_ERR(rx_sc)) {
1932 return PTR_ERR(rx_sc);
1935 was_active = rx_sc->active;
1936 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1937 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1939 if (macsec_is_offloaded(netdev_priv(dev))) {
1940 const struct macsec_ops *ops;
1941 struct macsec_context ctx;
1943 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1952 ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1962 rx_sc->active = was_active;
1967 static bool validate_add_txsa(struct nlattr **attrs)
1969 if (!attrs[MACSEC_SA_ATTR_AN] ||
1970 !attrs[MACSEC_SA_ATTR_PN] ||
1971 !attrs[MACSEC_SA_ATTR_KEY] ||
1972 !attrs[MACSEC_SA_ATTR_KEYID])
1975 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1978 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1981 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1982 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1986 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1992 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1994 struct net_device *dev;
1995 struct nlattr **attrs = info->attrs;
1996 struct macsec_secy *secy;
1997 struct macsec_tx_sc *tx_sc;
1998 struct macsec_tx_sa *tx_sa;
1999 unsigned char assoc_num;
2001 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2002 bool was_operational;
2005 if (!attrs[MACSEC_ATTR_IFINDEX])
2008 if (parse_sa_config(attrs, tb_sa))
2011 if (!validate_add_txsa(tb_sa))
2015 dev = get_dev_from_nl(genl_info_net(info), attrs);
2018 return PTR_ERR(dev);
2021 secy = &macsec_priv(dev)->secy;
2022 tx_sc = &secy->tx_sc;
2024 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
2026 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
2027 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
2028 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
2033 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2034 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2035 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
2036 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2042 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2047 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2048 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2049 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2056 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2062 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2068 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2069 secy->key_len, secy->icv_len);
2076 spin_lock_bh(&tx_sa->lock);
2077 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2078 spin_unlock_bh(&tx_sa->lock);
2080 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2081 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2083 was_operational = secy->operational;
2084 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2085 secy->operational = true;
2087 /* If h/w offloading is available, propagate to the device */
2088 if (macsec_is_offloaded(netdev_priv(dev))) {
2089 const struct macsec_ops *ops;
2090 struct macsec_context ctx;
2092 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2098 ctx.sa.assoc_num = assoc_num;
2099 ctx.sa.tx_sa = tx_sa;
2101 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2104 err = macsec_offload(ops->mdo_add_txsa, &ctx);
2110 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2111 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2115 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2116 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2123 secy->operational = was_operational;
2124 macsec_txsa_put(tx_sa);
2129 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2131 struct nlattr **attrs = info->attrs;
2132 struct net_device *dev;
2133 struct macsec_secy *secy;
2134 struct macsec_rx_sc *rx_sc;
2135 struct macsec_rx_sa *rx_sa;
2137 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2138 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2141 if (!attrs[MACSEC_ATTR_IFINDEX])
2144 if (parse_sa_config(attrs, tb_sa))
2147 if (parse_rxsc_config(attrs, tb_rxsc))
2151 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2152 &dev, &secy, &rx_sc, &assoc_num);
2153 if (IS_ERR(rx_sa)) {
2155 return PTR_ERR(rx_sa);
2158 if (rx_sa->active) {
2163 /* If h/w offloading is available, propagate to the device */
2164 if (macsec_is_offloaded(netdev_priv(dev))) {
2165 const struct macsec_ops *ops;
2166 struct macsec_context ctx;
2168 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2174 ctx.sa.assoc_num = assoc_num;
2175 ctx.sa.rx_sa = rx_sa;
2178 ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2183 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2195 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2197 struct nlattr **attrs = info->attrs;
2198 struct net_device *dev;
2199 struct macsec_secy *secy;
2200 struct macsec_rx_sc *rx_sc;
2202 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2205 if (!attrs[MACSEC_ATTR_IFINDEX])
2208 if (parse_rxsc_config(attrs, tb_rxsc))
2211 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2215 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2218 return PTR_ERR(dev);
2221 secy = &macsec_priv(dev)->secy;
2222 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2224 rx_sc = del_rx_sc(secy, sci);
2230 /* If h/w offloading is available, propagate to the device */
2231 if (macsec_is_offloaded(netdev_priv(dev))) {
2232 const struct macsec_ops *ops;
2233 struct macsec_context ctx;
2235 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2243 ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2258 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2260 struct nlattr **attrs = info->attrs;
2261 struct net_device *dev;
2262 struct macsec_secy *secy;
2263 struct macsec_tx_sc *tx_sc;
2264 struct macsec_tx_sa *tx_sa;
2266 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2269 if (!attrs[MACSEC_ATTR_IFINDEX])
2272 if (parse_sa_config(attrs, tb_sa))
2276 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2277 &dev, &secy, &tx_sc, &assoc_num);
2278 if (IS_ERR(tx_sa)) {
2280 return PTR_ERR(tx_sa);
2283 if (tx_sa->active) {
2288 /* If h/w offloading is available, propagate to the device */
2289 if (macsec_is_offloaded(netdev_priv(dev))) {
2290 const struct macsec_ops *ops;
2291 struct macsec_context ctx;
2293 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2299 ctx.sa.assoc_num = assoc_num;
2300 ctx.sa.tx_sa = tx_sa;
2303 ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2308 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2320 static bool validate_upd_sa(struct nlattr **attrs)
2322 if (!attrs[MACSEC_SA_ATTR_AN] ||
2323 attrs[MACSEC_SA_ATTR_KEY] ||
2324 attrs[MACSEC_SA_ATTR_KEYID] ||
2325 attrs[MACSEC_SA_ATTR_SSCI] ||
2326 attrs[MACSEC_SA_ATTR_SALT])
2329 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2332 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
2335 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2336 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2343 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2345 struct nlattr **attrs = info->attrs;
2346 struct net_device *dev;
2347 struct macsec_secy *secy;
2348 struct macsec_tx_sc *tx_sc;
2349 struct macsec_tx_sa *tx_sa;
2351 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2352 bool was_operational, was_active;
2358 if (!attrs[MACSEC_ATTR_IFINDEX])
2361 if (parse_sa_config(attrs, tb_sa))
2364 if (!validate_upd_sa(tb_sa))
2368 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2369 &dev, &secy, &tx_sc, &assoc_num);
2370 if (IS_ERR(tx_sa)) {
2372 return PTR_ERR(tx_sa);
2375 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2378 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2379 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2380 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2381 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2386 spin_lock_bh(&tx_sa->lock);
2387 prev_pn = tx_sa->next_pn_halves;
2388 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2389 spin_unlock_bh(&tx_sa->lock);
2392 was_active = tx_sa->active;
2393 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2394 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2396 was_operational = secy->operational;
2397 if (assoc_num == tx_sc->encoding_sa)
2398 secy->operational = tx_sa->active;
2400 /* If h/w offloading is available, propagate to the device */
2401 if (macsec_is_offloaded(netdev_priv(dev))) {
2402 const struct macsec_ops *ops;
2403 struct macsec_context ctx;
2405 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2411 ctx.sa.assoc_num = assoc_num;
2412 ctx.sa.tx_sa = tx_sa;
2415 ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2425 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2426 spin_lock_bh(&tx_sa->lock);
2427 tx_sa->next_pn_halves = prev_pn;
2428 spin_unlock_bh(&tx_sa->lock);
2430 tx_sa->active = was_active;
2431 secy->operational = was_operational;
2436 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2438 struct nlattr **attrs = info->attrs;
2439 struct net_device *dev;
2440 struct macsec_secy *secy;
2441 struct macsec_rx_sc *rx_sc;
2442 struct macsec_rx_sa *rx_sa;
2444 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2445 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2452 if (!attrs[MACSEC_ATTR_IFINDEX])
2455 if (parse_rxsc_config(attrs, tb_rxsc))
2458 if (parse_sa_config(attrs, tb_sa))
2461 if (!validate_upd_sa(tb_sa))
2465 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2466 &dev, &secy, &rx_sc, &assoc_num);
2467 if (IS_ERR(rx_sa)) {
2469 return PTR_ERR(rx_sa);
2472 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2475 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2476 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2477 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2478 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2483 spin_lock_bh(&rx_sa->lock);
2484 prev_pn = rx_sa->next_pn_halves;
2485 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2486 spin_unlock_bh(&rx_sa->lock);
2489 was_active = rx_sa->active;
2490 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2491 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2493 /* If h/w offloading is available, propagate to the device */
2494 if (macsec_is_offloaded(netdev_priv(dev))) {
2495 const struct macsec_ops *ops;
2496 struct macsec_context ctx;
2498 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2504 ctx.sa.assoc_num = assoc_num;
2505 ctx.sa.rx_sa = rx_sa;
2508 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2517 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2518 spin_lock_bh(&rx_sa->lock);
2519 rx_sa->next_pn_halves = prev_pn;
2520 spin_unlock_bh(&rx_sa->lock);
2522 rx_sa->active = was_active;
2527 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2529 struct nlattr **attrs = info->attrs;
2530 struct net_device *dev;
2531 struct macsec_secy *secy;
2532 struct macsec_rx_sc *rx_sc;
2533 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2534 unsigned int prev_n_rx_sc;
2538 if (!attrs[MACSEC_ATTR_IFINDEX])
2541 if (parse_rxsc_config(attrs, tb_rxsc))
2544 if (!validate_add_rxsc(tb_rxsc))
2548 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2549 if (IS_ERR(rx_sc)) {
2551 return PTR_ERR(rx_sc);
2554 was_active = rx_sc->active;
2555 prev_n_rx_sc = secy->n_rx_sc;
2556 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2557 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2559 if (rx_sc->active != new)
2560 secy->n_rx_sc += new ? 1 : -1;
2562 rx_sc->active = new;
2565 /* If h/w offloading is available, propagate to the device */
2566 if (macsec_is_offloaded(netdev_priv(dev))) {
2567 const struct macsec_ops *ops;
2568 struct macsec_context ctx;
2570 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2579 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2589 secy->n_rx_sc = prev_n_rx_sc;
2590 rx_sc->active = was_active;
2595 static bool macsec_is_configured(struct macsec_dev *macsec)
2597 struct macsec_secy *secy = &macsec->secy;
2598 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2601 if (secy->n_rx_sc > 0)
2604 for (i = 0; i < MACSEC_NUM_AN; i++)
2611 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2613 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2614 enum macsec_offload offload, prev_offload;
2615 int (*func)(struct macsec_context *ctx);
2616 struct nlattr **attrs = info->attrs;
2617 struct net_device *dev;
2618 const struct macsec_ops *ops;
2619 struct macsec_context ctx;
2620 struct macsec_dev *macsec;
2623 if (!attrs[MACSEC_ATTR_IFINDEX])
2626 if (!attrs[MACSEC_ATTR_OFFLOAD])
2629 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2630 attrs[MACSEC_ATTR_OFFLOAD],
2631 macsec_genl_offload_policy, NULL))
2634 dev = get_dev_from_nl(genl_info_net(info), attrs);
2636 return PTR_ERR(dev);
2637 macsec = macsec_priv(dev);
2639 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2642 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2643 if (macsec->offload == offload)
2646 /* Check if the offloading mode is supported by the underlying layers */
2647 if (offload != MACSEC_OFFLOAD_OFF &&
2648 !macsec_check_offload(offload, macsec))
2651 /* Check if the net device is busy. */
2652 if (netif_running(dev))
2657 prev_offload = macsec->offload;
2658 macsec->offload = offload;
2660 /* Check if the device already has rules configured: we do not support
2663 if (macsec_is_configured(macsec)) {
2668 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2675 if (prev_offload == MACSEC_OFFLOAD_OFF)
2676 func = ops->mdo_add_secy;
2678 func = ops->mdo_del_secy;
2680 ctx.secy = &macsec->secy;
2681 ret = macsec_offload(func, &ctx);
2685 /* Force features update, since they are different for SW MACSec and
2686 * HW offloading cases.
2688 netdev_update_features(dev);
2694 macsec->offload = prev_offload;
2700 static void get_tx_sa_stats(struct net_device *dev, int an,
2701 struct macsec_tx_sa *tx_sa,
2702 struct macsec_tx_sa_stats *sum)
2704 struct macsec_dev *macsec = macsec_priv(dev);
2707 /* If h/w offloading is available, propagate to the device */
2708 if (macsec_is_offloaded(macsec)) {
2709 const struct macsec_ops *ops;
2710 struct macsec_context ctx;
2712 ops = macsec_get_ops(macsec, &ctx);
2714 ctx.sa.assoc_num = an;
2715 ctx.sa.tx_sa = tx_sa;
2716 ctx.stats.tx_sa_stats = sum;
2717 ctx.secy = &macsec_priv(dev)->secy;
2718 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2723 for_each_possible_cpu(cpu) {
2724 const struct macsec_tx_sa_stats *stats =
2725 per_cpu_ptr(tx_sa->stats, cpu);
2727 sum->OutPktsProtected += stats->OutPktsProtected;
2728 sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2732 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2734 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2735 sum->OutPktsProtected) ||
2736 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2737 sum->OutPktsEncrypted))
2743 static void get_rx_sa_stats(struct net_device *dev,
2744 struct macsec_rx_sc *rx_sc, int an,
2745 struct macsec_rx_sa *rx_sa,
2746 struct macsec_rx_sa_stats *sum)
2748 struct macsec_dev *macsec = macsec_priv(dev);
2751 /* If h/w offloading is available, propagate to the device */
2752 if (macsec_is_offloaded(macsec)) {
2753 const struct macsec_ops *ops;
2754 struct macsec_context ctx;
2756 ops = macsec_get_ops(macsec, &ctx);
2758 ctx.sa.assoc_num = an;
2759 ctx.sa.rx_sa = rx_sa;
2760 ctx.stats.rx_sa_stats = sum;
2761 ctx.secy = &macsec_priv(dev)->secy;
2763 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2768 for_each_possible_cpu(cpu) {
2769 const struct macsec_rx_sa_stats *stats =
2770 per_cpu_ptr(rx_sa->stats, cpu);
2772 sum->InPktsOK += stats->InPktsOK;
2773 sum->InPktsInvalid += stats->InPktsInvalid;
2774 sum->InPktsNotValid += stats->InPktsNotValid;
2775 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2776 sum->InPktsUnusedSA += stats->InPktsUnusedSA;
2780 static int copy_rx_sa_stats(struct sk_buff *skb,
2781 struct macsec_rx_sa_stats *sum)
2783 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2784 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2785 sum->InPktsInvalid) ||
2786 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2787 sum->InPktsNotValid) ||
2788 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2789 sum->InPktsNotUsingSA) ||
2790 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2791 sum->InPktsUnusedSA))
2797 static void get_rx_sc_stats(struct net_device *dev,
2798 struct macsec_rx_sc *rx_sc,
2799 struct macsec_rx_sc_stats *sum)
2801 struct macsec_dev *macsec = macsec_priv(dev);
2804 /* If h/w offloading is available, propagate to the device */
2805 if (macsec_is_offloaded(macsec)) {
2806 const struct macsec_ops *ops;
2807 struct macsec_context ctx;
2809 ops = macsec_get_ops(macsec, &ctx);
2811 ctx.stats.rx_sc_stats = sum;
2812 ctx.secy = &macsec_priv(dev)->secy;
2814 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2819 for_each_possible_cpu(cpu) {
2820 const struct pcpu_rx_sc_stats *stats;
2821 struct macsec_rx_sc_stats tmp;
2824 stats = per_cpu_ptr(rx_sc->stats, cpu);
2826 start = u64_stats_fetch_begin_irq(&stats->syncp);
2827 memcpy(&tmp, &stats->stats, sizeof(tmp));
2828 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2830 sum->InOctetsValidated += tmp.InOctetsValidated;
2831 sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2832 sum->InPktsUnchecked += tmp.InPktsUnchecked;
2833 sum->InPktsDelayed += tmp.InPktsDelayed;
2834 sum->InPktsOK += tmp.InPktsOK;
2835 sum->InPktsInvalid += tmp.InPktsInvalid;
2836 sum->InPktsLate += tmp.InPktsLate;
2837 sum->InPktsNotValid += tmp.InPktsNotValid;
2838 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2839 sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
2843 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2845 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2846 sum->InOctetsValidated,
2847 MACSEC_RXSC_STATS_ATTR_PAD) ||
2848 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2849 sum->InOctetsDecrypted,
2850 MACSEC_RXSC_STATS_ATTR_PAD) ||
2851 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2852 sum->InPktsUnchecked,
2853 MACSEC_RXSC_STATS_ATTR_PAD) ||
2854 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2856 MACSEC_RXSC_STATS_ATTR_PAD) ||
2857 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2859 MACSEC_RXSC_STATS_ATTR_PAD) ||
2860 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2862 MACSEC_RXSC_STATS_ATTR_PAD) ||
2863 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2865 MACSEC_RXSC_STATS_ATTR_PAD) ||
2866 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2867 sum->InPktsNotValid,
2868 MACSEC_RXSC_STATS_ATTR_PAD) ||
2869 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2870 sum->InPktsNotUsingSA,
2871 MACSEC_RXSC_STATS_ATTR_PAD) ||
2872 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2873 sum->InPktsUnusedSA,
2874 MACSEC_RXSC_STATS_ATTR_PAD))
2880 static void get_tx_sc_stats(struct net_device *dev,
2881 struct macsec_tx_sc_stats *sum)
2883 struct macsec_dev *macsec = macsec_priv(dev);
2886 /* If h/w offloading is available, propagate to the device */
2887 if (macsec_is_offloaded(macsec)) {
2888 const struct macsec_ops *ops;
2889 struct macsec_context ctx;
2891 ops = macsec_get_ops(macsec, &ctx);
2893 ctx.stats.tx_sc_stats = sum;
2894 ctx.secy = &macsec_priv(dev)->secy;
2895 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2900 for_each_possible_cpu(cpu) {
2901 const struct pcpu_tx_sc_stats *stats;
2902 struct macsec_tx_sc_stats tmp;
2905 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2907 start = u64_stats_fetch_begin_irq(&stats->syncp);
2908 memcpy(&tmp, &stats->stats, sizeof(tmp));
2909 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2911 sum->OutPktsProtected += tmp.OutPktsProtected;
2912 sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
2913 sum->OutOctetsProtected += tmp.OutOctetsProtected;
2914 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2918 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2920 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2921 sum->OutPktsProtected,
2922 MACSEC_TXSC_STATS_ATTR_PAD) ||
2923 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2924 sum->OutPktsEncrypted,
2925 MACSEC_TXSC_STATS_ATTR_PAD) ||
2926 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2927 sum->OutOctetsProtected,
2928 MACSEC_TXSC_STATS_ATTR_PAD) ||
2929 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2930 sum->OutOctetsEncrypted,
2931 MACSEC_TXSC_STATS_ATTR_PAD))
2937 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2939 struct macsec_dev *macsec = macsec_priv(dev);
2942 /* If h/w offloading is available, propagate to the device */
2943 if (macsec_is_offloaded(macsec)) {
2944 const struct macsec_ops *ops;
2945 struct macsec_context ctx;
2947 ops = macsec_get_ops(macsec, &ctx);
2949 ctx.stats.dev_stats = sum;
2950 ctx.secy = &macsec_priv(dev)->secy;
2951 macsec_offload(ops->mdo_get_dev_stats, &ctx);
2956 for_each_possible_cpu(cpu) {
2957 const struct pcpu_secy_stats *stats;
2958 struct macsec_dev_stats tmp;
2961 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2963 start = u64_stats_fetch_begin_irq(&stats->syncp);
2964 memcpy(&tmp, &stats->stats, sizeof(tmp));
2965 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2967 sum->OutPktsUntagged += tmp.OutPktsUntagged;
2968 sum->InPktsUntagged += tmp.InPktsUntagged;
2969 sum->OutPktsTooLong += tmp.OutPktsTooLong;
2970 sum->InPktsNoTag += tmp.InPktsNoTag;
2971 sum->InPktsBadTag += tmp.InPktsBadTag;
2972 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2973 sum->InPktsNoSCI += tmp.InPktsNoSCI;
2974 sum->InPktsOverrun += tmp.InPktsOverrun;
2978 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2980 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2981 sum->OutPktsUntagged,
2982 MACSEC_SECY_STATS_ATTR_PAD) ||
2983 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2984 sum->InPktsUntagged,
2985 MACSEC_SECY_STATS_ATTR_PAD) ||
2986 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2987 sum->OutPktsTooLong,
2988 MACSEC_SECY_STATS_ATTR_PAD) ||
2989 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2991 MACSEC_SECY_STATS_ATTR_PAD) ||
2992 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2994 MACSEC_SECY_STATS_ATTR_PAD) ||
2995 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2996 sum->InPktsUnknownSCI,
2997 MACSEC_SECY_STATS_ATTR_PAD) ||
2998 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
3000 MACSEC_SECY_STATS_ATTR_PAD) ||
3001 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
3003 MACSEC_SECY_STATS_ATTR_PAD))
3009 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
3011 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3012 struct nlattr *secy_nest = nla_nest_start_noflag(skb,
3019 switch (secy->key_len) {
3020 case MACSEC_GCM_AES_128_SAK_LEN:
3021 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
3023 case MACSEC_GCM_AES_256_SAK_LEN:
3024 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
3030 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3031 MACSEC_SECY_ATTR_PAD) ||
3032 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3033 csid, MACSEC_SECY_ATTR_PAD) ||
3034 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3035 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3036 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3037 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3038 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3039 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3040 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3041 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3042 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3043 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3046 if (secy->replay_protect) {
3047 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3051 nla_nest_end(skb, secy_nest);
3055 nla_nest_cancel(skb, secy_nest);
3059 static noinline_for_stack int
3060 dump_secy(struct macsec_secy *secy, struct net_device *dev,
3061 struct sk_buff *skb, struct netlink_callback *cb)
3063 struct macsec_tx_sc_stats tx_sc_stats = {0, };
3064 struct macsec_tx_sa_stats tx_sa_stats = {0, };
3065 struct macsec_rx_sc_stats rx_sc_stats = {0, };
3066 struct macsec_rx_sa_stats rx_sa_stats = {0, };
3067 struct macsec_dev *macsec = netdev_priv(dev);
3068 struct macsec_dev_stats dev_stats = {0, };
3069 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3070 struct nlattr *txsa_list, *rxsc_list;
3071 struct macsec_rx_sc *rx_sc;
3072 struct nlattr *attr;
3076 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3077 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3081 genl_dump_check_consistent(cb, hdr);
3083 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3084 goto nla_put_failure;
3086 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3088 goto nla_put_failure;
3089 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3090 goto nla_put_failure;
3091 nla_nest_end(skb, attr);
3093 if (nla_put_secy(secy, skb))
3094 goto nla_put_failure;
3096 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3098 goto nla_put_failure;
3100 get_tx_sc_stats(dev, &tx_sc_stats);
3101 if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3102 nla_nest_cancel(skb, attr);
3103 goto nla_put_failure;
3105 nla_nest_end(skb, attr);
3107 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3109 goto nla_put_failure;
3110 get_secy_stats(dev, &dev_stats);
3111 if (copy_secy_stats(skb, &dev_stats)) {
3112 nla_nest_cancel(skb, attr);
3113 goto nla_put_failure;
3115 nla_nest_end(skb, attr);
3117 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3119 goto nla_put_failure;
3120 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3121 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3122 struct nlattr *txsa_nest;
3129 txsa_nest = nla_nest_start_noflag(skb, j++);
3131 nla_nest_cancel(skb, txsa_list);
3132 goto nla_put_failure;
3135 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3137 nla_nest_cancel(skb, txsa_nest);
3138 nla_nest_cancel(skb, txsa_list);
3139 goto nla_put_failure;
3141 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3142 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3143 if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3144 nla_nest_cancel(skb, attr);
3145 nla_nest_cancel(skb, txsa_nest);
3146 nla_nest_cancel(skb, txsa_list);
3147 goto nla_put_failure;
3149 nla_nest_end(skb, attr);
3152 pn = tx_sa->next_pn;
3153 pn_len = MACSEC_XPN_PN_LEN;
3155 pn = tx_sa->next_pn_halves.lower;
3156 pn_len = MACSEC_DEFAULT_PN_LEN;
3159 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3160 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3161 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3162 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3163 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3164 nla_nest_cancel(skb, txsa_nest);
3165 nla_nest_cancel(skb, txsa_list);
3166 goto nla_put_failure;
3169 nla_nest_end(skb, txsa_nest);
3171 nla_nest_end(skb, txsa_list);
3173 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3175 goto nla_put_failure;
3178 for_each_rxsc_rtnl(secy, rx_sc) {
3180 struct nlattr *rxsa_list;
3181 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3184 nla_nest_cancel(skb, rxsc_list);
3185 goto nla_put_failure;
3188 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3189 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3190 MACSEC_RXSC_ATTR_PAD)) {
3191 nla_nest_cancel(skb, rxsc_nest);
3192 nla_nest_cancel(skb, rxsc_list);
3193 goto nla_put_failure;
3196 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3198 nla_nest_cancel(skb, rxsc_nest);
3199 nla_nest_cancel(skb, rxsc_list);
3200 goto nla_put_failure;
3202 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3203 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3204 if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3205 nla_nest_cancel(skb, attr);
3206 nla_nest_cancel(skb, rxsc_nest);
3207 nla_nest_cancel(skb, rxsc_list);
3208 goto nla_put_failure;
3210 nla_nest_end(skb, attr);
3212 rxsa_list = nla_nest_start_noflag(skb,
3213 MACSEC_RXSC_ATTR_SA_LIST);
3215 nla_nest_cancel(skb, rxsc_nest);
3216 nla_nest_cancel(skb, rxsc_list);
3217 goto nla_put_failure;
3220 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3221 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3222 struct nlattr *rxsa_nest;
3229 rxsa_nest = nla_nest_start_noflag(skb, k++);
3231 nla_nest_cancel(skb, rxsa_list);
3232 nla_nest_cancel(skb, rxsc_nest);
3233 nla_nest_cancel(skb, rxsc_list);
3234 goto nla_put_failure;
3237 attr = nla_nest_start_noflag(skb,
3238 MACSEC_SA_ATTR_STATS);
3240 nla_nest_cancel(skb, rxsa_list);
3241 nla_nest_cancel(skb, rxsc_nest);
3242 nla_nest_cancel(skb, rxsc_list);
3243 goto nla_put_failure;
3245 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3246 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3247 if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3248 nla_nest_cancel(skb, attr);
3249 nla_nest_cancel(skb, rxsa_list);
3250 nla_nest_cancel(skb, rxsc_nest);
3251 nla_nest_cancel(skb, rxsc_list);
3252 goto nla_put_failure;
3254 nla_nest_end(skb, attr);
3257 pn = rx_sa->next_pn;
3258 pn_len = MACSEC_XPN_PN_LEN;
3260 pn = rx_sa->next_pn_halves.lower;
3261 pn_len = MACSEC_DEFAULT_PN_LEN;
3264 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3265 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3266 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3267 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3268 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3269 nla_nest_cancel(skb, rxsa_nest);
3270 nla_nest_cancel(skb, rxsc_nest);
3271 nla_nest_cancel(skb, rxsc_list);
3272 goto nla_put_failure;
3274 nla_nest_end(skb, rxsa_nest);
3277 nla_nest_end(skb, rxsa_list);
3278 nla_nest_end(skb, rxsc_nest);
3281 nla_nest_end(skb, rxsc_list);
3283 genlmsg_end(skb, hdr);
3288 genlmsg_cancel(skb, hdr);
3292 static int macsec_generation = 1; /* protected by RTNL */
3294 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3296 struct net *net = sock_net(skb->sk);
3297 struct net_device *dev;
3300 dev_idx = cb->args[0];
3305 cb->seq = macsec_generation;
3307 for_each_netdev(net, dev) {
3308 struct macsec_secy *secy;
3313 if (!netif_is_macsec(dev))
3316 secy = &macsec_priv(dev)->secy;
3317 if (dump_secy(secy, dev, skb, cb) < 0)
3329 static const struct genl_small_ops macsec_genl_ops[] = {
3331 .cmd = MACSEC_CMD_GET_TXSC,
3332 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3333 .dumpit = macsec_dump_txsc,
3336 .cmd = MACSEC_CMD_ADD_RXSC,
3337 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3338 .doit = macsec_add_rxsc,
3339 .flags = GENL_ADMIN_PERM,
3342 .cmd = MACSEC_CMD_DEL_RXSC,
3343 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3344 .doit = macsec_del_rxsc,
3345 .flags = GENL_ADMIN_PERM,
3348 .cmd = MACSEC_CMD_UPD_RXSC,
3349 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3350 .doit = macsec_upd_rxsc,
3351 .flags = GENL_ADMIN_PERM,
3354 .cmd = MACSEC_CMD_ADD_TXSA,
3355 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3356 .doit = macsec_add_txsa,
3357 .flags = GENL_ADMIN_PERM,
3360 .cmd = MACSEC_CMD_DEL_TXSA,
3361 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3362 .doit = macsec_del_txsa,
3363 .flags = GENL_ADMIN_PERM,
3366 .cmd = MACSEC_CMD_UPD_TXSA,
3367 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3368 .doit = macsec_upd_txsa,
3369 .flags = GENL_ADMIN_PERM,
3372 .cmd = MACSEC_CMD_ADD_RXSA,
3373 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3374 .doit = macsec_add_rxsa,
3375 .flags = GENL_ADMIN_PERM,
3378 .cmd = MACSEC_CMD_DEL_RXSA,
3379 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3380 .doit = macsec_del_rxsa,
3381 .flags = GENL_ADMIN_PERM,
3384 .cmd = MACSEC_CMD_UPD_RXSA,
3385 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3386 .doit = macsec_upd_rxsa,
3387 .flags = GENL_ADMIN_PERM,
3390 .cmd = MACSEC_CMD_UPD_OFFLOAD,
3391 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3392 .doit = macsec_upd_offload,
3393 .flags = GENL_ADMIN_PERM,
3397 static struct genl_family macsec_fam __ro_after_init = {
3398 .name = MACSEC_GENL_NAME,
3400 .version = MACSEC_GENL_VERSION,
3401 .maxattr = MACSEC_ATTR_MAX,
3402 .policy = macsec_genl_policy,
3404 .module = THIS_MODULE,
3405 .small_ops = macsec_genl_ops,
3406 .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
3409 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3410 struct net_device *dev)
3412 struct macsec_dev *macsec = netdev_priv(dev);
3413 struct macsec_secy *secy = &macsec->secy;
3414 struct pcpu_secy_stats *secy_stats;
3417 if (macsec_is_offloaded(netdev_priv(dev))) {
3418 skb->dev = macsec->real_dev;
3419 return dev_queue_xmit(skb);
3423 if (!secy->protect_frames) {
3424 secy_stats = this_cpu_ptr(macsec->stats);
3425 u64_stats_update_begin(&secy_stats->syncp);
3426 secy_stats->stats.OutPktsUntagged++;
3427 u64_stats_update_end(&secy_stats->syncp);
3428 skb->dev = macsec->real_dev;
3430 ret = dev_queue_xmit(skb);
3431 count_tx(dev, ret, len);
3435 if (!secy->operational) {
3437 dev->stats.tx_dropped++;
3438 return NETDEV_TX_OK;
3442 skb = macsec_encrypt(skb, dev);
3444 if (PTR_ERR(skb) != -EINPROGRESS)
3445 dev->stats.tx_dropped++;
3446 return NETDEV_TX_OK;
3449 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3451 macsec_encrypt_finish(skb, dev);
3452 ret = dev_queue_xmit(skb);
3453 count_tx(dev, ret, len);
3457 #define SW_MACSEC_FEATURES \
3458 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3460 /* If h/w offloading is enabled, use real device features save for
3461 * VLAN_FEATURES - they require additional ops
3462 * HW_MACSEC - no reason to report it
3464 #define REAL_DEV_FEATURES(dev) \
3465 ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3467 static int macsec_dev_init(struct net_device *dev)
3469 struct macsec_dev *macsec = macsec_priv(dev);
3470 struct net_device *real_dev = macsec->real_dev;
3473 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3477 err = gro_cells_init(&macsec->gro_cells, dev);
3479 free_percpu(dev->tstats);
3483 if (macsec_is_offloaded(macsec)) {
3484 dev->features = REAL_DEV_FEATURES(real_dev);
3486 dev->features = real_dev->features & SW_MACSEC_FEATURES;
3487 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3490 dev->needed_headroom = real_dev->needed_headroom +
3491 MACSEC_NEEDED_HEADROOM;
3492 dev->needed_tailroom = real_dev->needed_tailroom +
3493 MACSEC_NEEDED_TAILROOM;
3495 if (is_zero_ether_addr(dev->dev_addr))
3496 eth_hw_addr_inherit(dev, real_dev);
3497 if (is_zero_ether_addr(dev->broadcast))
3498 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3500 /* Get macsec's reference to real_dev */
3501 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
3506 static void macsec_dev_uninit(struct net_device *dev)
3508 struct macsec_dev *macsec = macsec_priv(dev);
3510 gro_cells_destroy(&macsec->gro_cells);
3511 free_percpu(dev->tstats);
3514 static netdev_features_t macsec_fix_features(struct net_device *dev,
3515 netdev_features_t features)
3517 struct macsec_dev *macsec = macsec_priv(dev);
3518 struct net_device *real_dev = macsec->real_dev;
3520 if (macsec_is_offloaded(macsec))
3521 return REAL_DEV_FEATURES(real_dev);
3523 features &= (real_dev->features & SW_MACSEC_FEATURES) |
3524 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3525 features |= NETIF_F_LLTX;
3530 static int macsec_dev_open(struct net_device *dev)
3532 struct macsec_dev *macsec = macsec_priv(dev);
3533 struct net_device *real_dev = macsec->real_dev;
3536 err = dev_uc_add(real_dev, dev->dev_addr);
3540 if (dev->flags & IFF_ALLMULTI) {
3541 err = dev_set_allmulti(real_dev, 1);
3546 if (dev->flags & IFF_PROMISC) {
3547 err = dev_set_promiscuity(real_dev, 1);
3549 goto clear_allmulti;
3552 /* If h/w offloading is available, propagate to the device */
3553 if (macsec_is_offloaded(macsec)) {
3554 const struct macsec_ops *ops;
3555 struct macsec_context ctx;
3557 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3560 goto clear_allmulti;
3563 ctx.secy = &macsec->secy;
3564 err = macsec_offload(ops->mdo_dev_open, &ctx);
3566 goto clear_allmulti;
3569 if (netif_carrier_ok(real_dev))
3570 netif_carrier_on(dev);
3574 if (dev->flags & IFF_ALLMULTI)
3575 dev_set_allmulti(real_dev, -1);
3577 dev_uc_del(real_dev, dev->dev_addr);
3578 netif_carrier_off(dev);
3582 static int macsec_dev_stop(struct net_device *dev)
3584 struct macsec_dev *macsec = macsec_priv(dev);
3585 struct net_device *real_dev = macsec->real_dev;
3587 netif_carrier_off(dev);
3589 /* If h/w offloading is available, propagate to the device */
3590 if (macsec_is_offloaded(macsec)) {
3591 const struct macsec_ops *ops;
3592 struct macsec_context ctx;
3594 ops = macsec_get_ops(macsec, &ctx);
3596 ctx.secy = &macsec->secy;
3597 macsec_offload(ops->mdo_dev_stop, &ctx);
3601 dev_mc_unsync(real_dev, dev);
3602 dev_uc_unsync(real_dev, dev);
3604 if (dev->flags & IFF_ALLMULTI)
3605 dev_set_allmulti(real_dev, -1);
3607 if (dev->flags & IFF_PROMISC)
3608 dev_set_promiscuity(real_dev, -1);
3610 dev_uc_del(real_dev, dev->dev_addr);
3615 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3617 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3619 if (!(dev->flags & IFF_UP))
3622 if (change & IFF_ALLMULTI)
3623 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3625 if (change & IFF_PROMISC)
3626 dev_set_promiscuity(real_dev,
3627 dev->flags & IFF_PROMISC ? 1 : -1);
3630 static void macsec_dev_set_rx_mode(struct net_device *dev)
3632 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3634 dev_mc_sync(real_dev, dev);
3635 dev_uc_sync(real_dev, dev);
3638 static int macsec_set_mac_address(struct net_device *dev, void *p)
3640 struct macsec_dev *macsec = macsec_priv(dev);
3641 struct net_device *real_dev = macsec->real_dev;
3642 struct sockaddr *addr = p;
3645 if (!is_valid_ether_addr(addr->sa_data))
3646 return -EADDRNOTAVAIL;
3648 if (!(dev->flags & IFF_UP))
3651 err = dev_uc_add(real_dev, addr->sa_data);
3655 dev_uc_del(real_dev, dev->dev_addr);
3658 eth_hw_addr_set(dev, addr->sa_data);
3660 /* If h/w offloading is available, propagate to the device */
3661 if (macsec_is_offloaded(macsec)) {
3662 const struct macsec_ops *ops;
3663 struct macsec_context ctx;
3665 ops = macsec_get_ops(macsec, &ctx);
3667 ctx.secy = &macsec->secy;
3668 macsec_offload(ops->mdo_upd_secy, &ctx);
3675 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3677 struct macsec_dev *macsec = macsec_priv(dev);
3678 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3680 if (macsec->real_dev->mtu - extra < new_mtu)
3688 static void macsec_get_stats64(struct net_device *dev,
3689 struct rtnl_link_stats64 *s)
3694 dev_fetch_sw_netstats(s, dev->tstats);
3696 s->rx_dropped = dev->stats.rx_dropped;
3697 s->tx_dropped = dev->stats.tx_dropped;
3698 s->rx_errors = dev->stats.rx_errors;
3701 static int macsec_get_iflink(const struct net_device *dev)
3703 return macsec_priv(dev)->real_dev->ifindex;
3706 static const struct net_device_ops macsec_netdev_ops = {
3707 .ndo_init = macsec_dev_init,
3708 .ndo_uninit = macsec_dev_uninit,
3709 .ndo_open = macsec_dev_open,
3710 .ndo_stop = macsec_dev_stop,
3711 .ndo_fix_features = macsec_fix_features,
3712 .ndo_change_mtu = macsec_change_mtu,
3713 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
3714 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
3715 .ndo_set_mac_address = macsec_set_mac_address,
3716 .ndo_start_xmit = macsec_start_xmit,
3717 .ndo_get_stats64 = macsec_get_stats64,
3718 .ndo_get_iflink = macsec_get_iflink,
3721 static const struct device_type macsec_type = {
3725 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3726 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3727 [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3728 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3729 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3730 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3731 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3732 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3733 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3734 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3735 [IFLA_MACSEC_ES] = { .type = NLA_U8 },
3736 [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3737 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3738 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3741 static void macsec_free_netdev(struct net_device *dev)
3743 struct macsec_dev *macsec = macsec_priv(dev);
3745 free_percpu(macsec->stats);
3746 free_percpu(macsec->secy.tx_sc.stats);
3748 /* Get rid of the macsec's reference to real_dev */
3749 netdev_put(macsec->real_dev, &macsec->dev_tracker);
3752 static void macsec_setup(struct net_device *dev)
3756 dev->max_mtu = ETH_MAX_MTU;
3757 dev->priv_flags |= IFF_NO_QUEUE;
3758 dev->netdev_ops = &macsec_netdev_ops;
3759 dev->needs_free_netdev = true;
3760 dev->priv_destructor = macsec_free_netdev;
3761 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3763 eth_zero_addr(dev->broadcast);
3766 static int macsec_changelink_common(struct net_device *dev,
3767 struct nlattr *data[])
3769 struct macsec_secy *secy;
3770 struct macsec_tx_sc *tx_sc;
3772 secy = &macsec_priv(dev)->secy;
3773 tx_sc = &secy->tx_sc;
3775 if (data[IFLA_MACSEC_ENCODING_SA]) {
3776 struct macsec_tx_sa *tx_sa;
3778 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3779 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3781 secy->operational = tx_sa && tx_sa->active;
3784 if (data[IFLA_MACSEC_ENCRYPT])
3785 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3787 if (data[IFLA_MACSEC_PROTECT])
3788 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3790 if (data[IFLA_MACSEC_INC_SCI])
3791 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3793 if (data[IFLA_MACSEC_ES])
3794 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3796 if (data[IFLA_MACSEC_SCB])
3797 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3799 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3800 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3802 if (data[IFLA_MACSEC_VALIDATION])
3803 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3805 if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3806 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3807 case MACSEC_CIPHER_ID_GCM_AES_128:
3808 case MACSEC_DEFAULT_CIPHER_ID:
3809 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3812 case MACSEC_CIPHER_ID_GCM_AES_256:
3813 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3816 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3817 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3820 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3821 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3829 if (data[IFLA_MACSEC_WINDOW]) {
3830 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3832 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3833 * for XPN cipher suites */
3835 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3842 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3843 struct nlattr *data[],
3844 struct netlink_ext_ack *extack)
3846 struct macsec_dev *macsec = macsec_priv(dev);
3847 struct macsec_tx_sc tx_sc;
3848 struct macsec_secy secy;
3854 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3855 data[IFLA_MACSEC_ICV_LEN] ||
3856 data[IFLA_MACSEC_SCI] ||
3857 data[IFLA_MACSEC_PORT])
3860 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3861 * propagation fails, to revert macsec_changelink_common.
3863 memcpy(&secy, &macsec->secy, sizeof(secy));
3864 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3866 ret = macsec_changelink_common(dev, data);
3870 /* If h/w offloading is available, propagate to the device */
3871 if (macsec_is_offloaded(macsec)) {
3872 const struct macsec_ops *ops;
3873 struct macsec_context ctx;
3876 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3882 ctx.secy = &macsec->secy;
3883 ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3891 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3892 memcpy(&macsec->secy, &secy, sizeof(secy));
3897 static void macsec_del_dev(struct macsec_dev *macsec)
3901 while (macsec->secy.rx_sc) {
3902 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3904 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3908 for (i = 0; i < MACSEC_NUM_AN; i++) {
3909 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3912 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3918 static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3920 struct macsec_dev *macsec = macsec_priv(dev);
3921 struct net_device *real_dev = macsec->real_dev;
3923 /* If h/w offloading is available, propagate to the device */
3924 if (macsec_is_offloaded(macsec)) {
3925 const struct macsec_ops *ops;
3926 struct macsec_context ctx;
3928 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3930 ctx.secy = &macsec->secy;
3931 macsec_offload(ops->mdo_del_secy, &ctx);
3935 unregister_netdevice_queue(dev, head);
3936 list_del_rcu(&macsec->secys);
3937 macsec_del_dev(macsec);
3938 netdev_upper_dev_unlink(real_dev, dev);
3940 macsec_generation++;
3943 static void macsec_dellink(struct net_device *dev, struct list_head *head)
3945 struct macsec_dev *macsec = macsec_priv(dev);
3946 struct net_device *real_dev = macsec->real_dev;
3947 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3949 macsec_common_dellink(dev, head);
3951 if (list_empty(&rxd->secys)) {
3952 netdev_rx_handler_unregister(real_dev);
3957 static int register_macsec_dev(struct net_device *real_dev,
3958 struct net_device *dev)
3960 struct macsec_dev *macsec = macsec_priv(dev);
3961 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3966 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3970 INIT_LIST_HEAD(&rxd->secys);
3972 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3980 list_add_tail_rcu(&macsec->secys, &rxd->secys);
3984 static bool sci_exists(struct net_device *dev, sci_t sci)
3986 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3987 struct macsec_dev *macsec;
3989 list_for_each_entry(macsec, &rxd->secys, secys) {
3990 if (macsec->secy.sci == sci)
3997 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3999 return make_sci(dev->dev_addr, port);
4002 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
4004 struct macsec_dev *macsec = macsec_priv(dev);
4005 struct macsec_secy *secy = &macsec->secy;
4007 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
4011 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
4012 if (!secy->tx_sc.stats) {
4013 free_percpu(macsec->stats);
4017 if (sci == MACSEC_UNDEF_SCI)
4018 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4021 secy->operational = true;
4022 secy->key_len = DEFAULT_SAK_LEN;
4023 secy->icv_len = icv_len;
4024 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
4025 secy->protect_frames = true;
4026 secy->replay_protect = false;
4027 secy->xpn = DEFAULT_XPN;
4030 secy->tx_sc.active = true;
4031 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
4032 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
4033 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
4034 secy->tx_sc.end_station = false;
4035 secy->tx_sc.scb = false;
4040 static struct lock_class_key macsec_netdev_addr_lock_key;
4042 static int macsec_newlink(struct net *net, struct net_device *dev,
4043 struct nlattr *tb[], struct nlattr *data[],
4044 struct netlink_ext_ack *extack)
4046 struct macsec_dev *macsec = macsec_priv(dev);
4047 rx_handler_func_t *rx_handler;
4048 u8 icv_len = DEFAULT_ICV_LEN;
4049 struct net_device *real_dev;
4055 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4058 if (real_dev->type != ARPHRD_ETHER)
4061 dev->priv_flags |= IFF_MACSEC;
4063 macsec->real_dev = real_dev;
4065 if (data && data[IFLA_MACSEC_OFFLOAD])
4066 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4068 /* MACsec offloading is off by default */
4069 macsec->offload = MACSEC_OFFLOAD_OFF;
4071 /* Check if the offloading mode is supported by the underlying layers */
4072 if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4073 !macsec_check_offload(macsec->offload, macsec))
4076 /* send_sci must be set to true when transmit sci explicitly is set */
4077 if ((data && data[IFLA_MACSEC_SCI]) &&
4078 (data && data[IFLA_MACSEC_INC_SCI])) {
4079 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4085 if (data && data[IFLA_MACSEC_ICV_LEN])
4086 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4087 mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4093 rx_handler = rtnl_dereference(real_dev->rx_handler);
4094 if (rx_handler && rx_handler != macsec_handle_frame)
4097 err = register_netdevice(dev);
4101 netdev_lockdep_set_classes(dev);
4102 lockdep_set_class(&dev->addr_list_lock,
4103 &macsec_netdev_addr_lock_key);
4105 err = netdev_upper_dev_link(real_dev, dev, extack);
4109 /* need to be already registered so that ->init has run and
4110 * the MAC addr is set
4112 if (data && data[IFLA_MACSEC_SCI])
4113 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4114 else if (data && data[IFLA_MACSEC_PORT])
4115 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4117 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4119 if (rx_handler && sci_exists(real_dev, sci)) {
4124 err = macsec_add_dev(dev, sci, icv_len);
4129 err = macsec_changelink_common(dev, data);
4134 /* If h/w offloading is available, propagate to the device */
4135 if (macsec_is_offloaded(macsec)) {
4136 const struct macsec_ops *ops;
4137 struct macsec_context ctx;
4139 ops = macsec_get_ops(macsec, &ctx);
4141 ctx.secy = &macsec->secy;
4142 err = macsec_offload(ops->mdo_add_secy, &ctx);
4148 err = register_macsec_dev(real_dev, dev);
4152 netif_stacked_transfer_operstate(real_dev, dev);
4153 linkwatch_fire_event(dev);
4155 macsec_generation++;
4160 macsec_del_dev(macsec);
4162 netdev_upper_dev_unlink(real_dev, dev);
4164 unregister_netdevice(dev);
4168 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4169 struct netlink_ext_ack *extack)
4171 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4172 u8 icv_len = DEFAULT_ICV_LEN;
4179 if (data[IFLA_MACSEC_CIPHER_SUITE])
4180 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4182 if (data[IFLA_MACSEC_ICV_LEN]) {
4183 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4184 if (icv_len != DEFAULT_ICV_LEN) {
4185 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4186 struct crypto_aead *dummy_tfm;
4188 dummy_tfm = macsec_alloc_tfm(dummy_key,
4191 if (IS_ERR(dummy_tfm))
4192 return PTR_ERR(dummy_tfm);
4193 crypto_free_aead(dummy_tfm);
4198 case MACSEC_CIPHER_ID_GCM_AES_128:
4199 case MACSEC_CIPHER_ID_GCM_AES_256:
4200 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4201 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4202 case MACSEC_DEFAULT_CIPHER_ID:
4203 if (icv_len < MACSEC_MIN_ICV_LEN ||
4204 icv_len > MACSEC_STD_ICV_LEN)
4211 if (data[IFLA_MACSEC_ENCODING_SA]) {
4212 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4216 for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4217 flag < IFLA_MACSEC_VALIDATION;
4220 if (nla_get_u8(data[flag]) > 1)
4225 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4226 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4227 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4229 if ((sci && (scb || es)) || (scb && es))
4232 if (data[IFLA_MACSEC_VALIDATION] &&
4233 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4236 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4237 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4238 !data[IFLA_MACSEC_WINDOW])
4244 static struct net *macsec_get_link_net(const struct net_device *dev)
4246 return dev_net(macsec_priv(dev)->real_dev);
4249 static size_t macsec_get_size(const struct net_device *dev)
4251 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4252 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4253 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4254 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4255 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4256 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4257 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4258 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4259 nla_total_size(1) + /* IFLA_MACSEC_ES */
4260 nla_total_size(1) + /* IFLA_MACSEC_SCB */
4261 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4262 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4266 static int macsec_fill_info(struct sk_buff *skb,
4267 const struct net_device *dev)
4269 struct macsec_secy *secy = &macsec_priv(dev)->secy;
4270 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
4273 switch (secy->key_len) {
4274 case MACSEC_GCM_AES_128_SAK_LEN:
4275 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4277 case MACSEC_GCM_AES_256_SAK_LEN:
4278 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4281 goto nla_put_failure;
4284 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4286 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4287 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4288 csid, IFLA_MACSEC_PAD) ||
4289 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4290 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4291 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4292 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4293 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4294 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4295 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4296 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4298 goto nla_put_failure;
4300 if (secy->replay_protect) {
4301 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4302 goto nla_put_failure;
4311 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4313 .priv_size = sizeof(struct macsec_dev),
4314 .maxtype = IFLA_MACSEC_MAX,
4315 .policy = macsec_rtnl_policy,
4316 .setup = macsec_setup,
4317 .validate = macsec_validate_attr,
4318 .newlink = macsec_newlink,
4319 .changelink = macsec_changelink,
4320 .dellink = macsec_dellink,
4321 .get_size = macsec_get_size,
4322 .fill_info = macsec_fill_info,
4323 .get_link_net = macsec_get_link_net,
4326 static bool is_macsec_master(struct net_device *dev)
4328 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4331 static int macsec_notify(struct notifier_block *this, unsigned long event,
4334 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4337 if (!is_macsec_master(real_dev))
4343 case NETDEV_CHANGE: {
4344 struct macsec_dev *m, *n;
4345 struct macsec_rxh_data *rxd;
4347 rxd = macsec_data_rtnl(real_dev);
4348 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4349 struct net_device *dev = m->secy.netdev;
4351 netif_stacked_transfer_operstate(real_dev, dev);
4355 case NETDEV_UNREGISTER: {
4356 struct macsec_dev *m, *n;
4357 struct macsec_rxh_data *rxd;
4359 rxd = macsec_data_rtnl(real_dev);
4360 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4361 macsec_common_dellink(m->secy.netdev, &head);
4364 netdev_rx_handler_unregister(real_dev);
4367 unregister_netdevice_many(&head);
4370 case NETDEV_CHANGEMTU: {
4371 struct macsec_dev *m;
4372 struct macsec_rxh_data *rxd;
4374 rxd = macsec_data_rtnl(real_dev);
4375 list_for_each_entry(m, &rxd->secys, secys) {
4376 struct net_device *dev = m->secy.netdev;
4377 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4378 macsec_extra_len(true));
4381 dev_set_mtu(dev, mtu);
4389 static struct notifier_block macsec_notifier = {
4390 .notifier_call = macsec_notify,
4393 static int __init macsec_init(void)
4397 pr_info("MACsec IEEE 802.1AE\n");
4398 err = register_netdevice_notifier(&macsec_notifier);
4402 err = rtnl_link_register(&macsec_link_ops);
4406 err = genl_register_family(&macsec_fam);
4413 rtnl_link_unregister(&macsec_link_ops);
4415 unregister_netdevice_notifier(&macsec_notifier);
4419 static void __exit macsec_exit(void)
4421 genl_unregister_family(&macsec_fam);
4422 rtnl_link_unregister(&macsec_link_ops);
4423 unregister_netdevice_notifier(&macsec_notifier);
4427 module_init(macsec_init);
4428 module_exit(macsec_exit);
4430 MODULE_ALIAS_RTNL_LINK("macsec");
4431 MODULE_ALIAS_GENL_FAMILY("macsec");
4433 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4434 MODULE_LICENSE("GPL v2");