1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014 Fraunhofer ITWM
6 * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
10 #include <linux/bug.h>
11 #include <linux/completion.h>
12 #include <linux/ieee802154.h>
13 #include <linux/rculist.h>
15 #include <crypto/aead.h>
16 #include <crypto/skcipher.h>
18 #include "ieee802154_i.h"
21 static void llsec_key_put(struct mac802154_llsec_key *key);
22 static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
23 const struct ieee802154_llsec_key_id *b);
25 static void llsec_dev_free(struct mac802154_llsec_device *dev);
27 void mac802154_llsec_init(struct mac802154_llsec *sec)
29 memset(sec, 0, sizeof(*sec));
31 memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
33 INIT_LIST_HEAD(&sec->table.security_levels);
34 INIT_LIST_HEAD(&sec->table.devices);
35 INIT_LIST_HEAD(&sec->table.keys);
36 hash_init(sec->devices_short);
37 hash_init(sec->devices_hw);
38 rwlock_init(&sec->lock);
41 void mac802154_llsec_destroy(struct mac802154_llsec *sec)
43 struct ieee802154_llsec_seclevel *sl, *sn;
44 struct ieee802154_llsec_device *dev, *dn;
45 struct ieee802154_llsec_key_entry *key, *kn;
47 list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
48 struct mac802154_llsec_seclevel *msl;
50 msl = container_of(sl, struct mac802154_llsec_seclevel, level);
55 list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
56 struct mac802154_llsec_device *mdev;
58 mdev = container_of(dev, struct mac802154_llsec_device, dev);
63 list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
64 struct mac802154_llsec_key *mkey;
66 mkey = container_of(key->key, struct mac802154_llsec_key, key);
73 int mac802154_llsec_get_params(struct mac802154_llsec *sec,
74 struct ieee802154_llsec_params *params)
76 read_lock_bh(&sec->lock);
77 *params = sec->params;
78 read_unlock_bh(&sec->lock);
83 int mac802154_llsec_set_params(struct mac802154_llsec *sec,
84 const struct ieee802154_llsec_params *params,
87 write_lock_bh(&sec->lock);
89 if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
90 sec->params.enabled = params->enabled;
91 if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
92 sec->params.frame_counter = params->frame_counter;
93 if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
94 sec->params.out_level = params->out_level;
95 if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
96 sec->params.out_key = params->out_key;
97 if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
98 sec->params.default_key_source = params->default_key_source;
99 if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
100 sec->params.pan_id = params->pan_id;
101 if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
102 sec->params.hwaddr = params->hwaddr;
103 if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
104 sec->params.coord_hwaddr = params->coord_hwaddr;
105 if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
106 sec->params.coord_shortaddr = params->coord_shortaddr;
108 write_unlock_bh(&sec->lock);
113 static struct mac802154_llsec_key*
114 llsec_key_alloc(const struct ieee802154_llsec_key *template)
116 const int authsizes[3] = { 4, 8, 16 };
117 struct mac802154_llsec_key *key;
120 key = kzalloc(sizeof(*key), GFP_KERNEL);
124 kref_init(&key->ref);
125 key->key = *template;
127 BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
129 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
130 key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
132 if (IS_ERR(key->tfm[i]))
134 if (crypto_aead_setkey(key->tfm[i], template->key,
135 IEEE802154_LLSEC_KEY_SIZE))
137 if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
141 key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
142 if (IS_ERR(key->tfm0))
145 if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
146 IEEE802154_LLSEC_KEY_SIZE))
152 crypto_free_sync_skcipher(key->tfm0);
154 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
156 crypto_free_aead(key->tfm[i]);
158 kfree_sensitive(key);
162 static void llsec_key_release(struct kref *ref)
164 struct mac802154_llsec_key *key;
167 key = container_of(ref, struct mac802154_llsec_key, ref);
169 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
170 crypto_free_aead(key->tfm[i]);
172 crypto_free_sync_skcipher(key->tfm0);
173 kfree_sensitive(key);
176 static struct mac802154_llsec_key*
177 llsec_key_get(struct mac802154_llsec_key *key)
183 static void llsec_key_put(struct mac802154_llsec_key *key)
185 kref_put(&key->ref, llsec_key_release);
188 static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
189 const struct ieee802154_llsec_key_id *b)
191 if (a->mode != b->mode)
194 if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
195 return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
201 case IEEE802154_SCF_KEY_INDEX:
203 case IEEE802154_SCF_KEY_SHORT_INDEX:
204 return a->short_source == b->short_source;
205 case IEEE802154_SCF_KEY_HW_INDEX:
206 return a->extended_source == b->extended_source;
212 int mac802154_llsec_key_add(struct mac802154_llsec *sec,
213 const struct ieee802154_llsec_key_id *id,
214 const struct ieee802154_llsec_key *key)
216 struct mac802154_llsec_key *mkey = NULL;
217 struct ieee802154_llsec_key_entry *pos, *new;
219 if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
223 list_for_each_entry(pos, &sec->table.keys, list) {
224 if (llsec_key_id_equal(&pos->id, id))
227 if (memcmp(pos->key->key, key->key,
228 IEEE802154_LLSEC_KEY_SIZE))
231 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
233 /* Don't allow multiple instances of the same AES key to have
234 * different allowed frame types/command frame ids, as this is
235 * not possible in the 802.15.4 PIB.
237 if (pos->key->frame_types != key->frame_types ||
238 pos->key->cmd_frame_ids != key->cmd_frame_ids)
244 new = kzalloc(sizeof(*new), GFP_KERNEL);
249 mkey = llsec_key_alloc(key);
251 mkey = llsec_key_get(mkey);
257 new->key = &mkey->key;
259 list_add_rcu(&new->list, &sec->table.keys);
264 kfree_sensitive(new);
268 int mac802154_llsec_key_del(struct mac802154_llsec *sec,
269 const struct ieee802154_llsec_key_id *key)
271 struct ieee802154_llsec_key_entry *pos;
273 list_for_each_entry(pos, &sec->table.keys, list) {
274 struct mac802154_llsec_key *mkey;
276 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
278 if (llsec_key_id_equal(&pos->id, key)) {
279 list_del_rcu(&pos->list);
288 static bool llsec_dev_use_shortaddr(__le16 short_addr)
290 return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
291 short_addr != cpu_to_le16(0xffff);
294 static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
296 return ((__force u16)short_addr) << 16 | (__force u16)pan_id;
299 static u64 llsec_dev_hash_long(__le64 hwaddr)
301 return (__force u64)hwaddr;
304 static struct mac802154_llsec_device*
305 llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
308 struct mac802154_llsec_device *dev;
309 u32 key = llsec_dev_hash_short(short_addr, pan_id);
311 hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
312 if (dev->dev.short_addr == short_addr &&
313 dev->dev.pan_id == pan_id)
320 static struct mac802154_llsec_device*
321 llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
323 struct mac802154_llsec_device *dev;
324 u64 key = llsec_dev_hash_long(hwaddr);
326 hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
327 if (dev->dev.hwaddr == hwaddr)
334 static void llsec_dev_free(struct mac802154_llsec_device *dev)
336 struct ieee802154_llsec_device_key *pos, *pn;
337 struct mac802154_llsec_device_key *devkey;
339 list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
340 devkey = container_of(pos, struct mac802154_llsec_device_key,
343 list_del(&pos->list);
344 kfree_sensitive(devkey);
347 kfree_sensitive(dev);
350 int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
351 const struct ieee802154_llsec_device *dev)
353 struct mac802154_llsec_device *entry;
354 u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
355 u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
357 BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
359 if ((llsec_dev_use_shortaddr(dev->short_addr) &&
360 llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
361 llsec_dev_find_long(sec, dev->hwaddr))
364 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
369 spin_lock_init(&entry->lock);
370 INIT_LIST_HEAD(&entry->dev.keys);
372 if (llsec_dev_use_shortaddr(dev->short_addr))
373 hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
375 INIT_HLIST_NODE(&entry->bucket_s);
377 hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
378 list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
383 static void llsec_dev_free_rcu(struct rcu_head *rcu)
385 llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
388 int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
390 struct mac802154_llsec_device *pos;
392 pos = llsec_dev_find_long(sec, device_addr);
396 hash_del_rcu(&pos->bucket_s);
397 hash_del_rcu(&pos->bucket_hw);
398 list_del_rcu(&pos->dev.list);
399 call_rcu(&pos->rcu, llsec_dev_free_rcu);
404 static struct mac802154_llsec_device_key*
405 llsec_devkey_find(struct mac802154_llsec_device *dev,
406 const struct ieee802154_llsec_key_id *key)
408 struct ieee802154_llsec_device_key *devkey;
410 list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
411 if (!llsec_key_id_equal(key, &devkey->key_id))
414 return container_of(devkey, struct mac802154_llsec_device_key,
421 int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
423 const struct ieee802154_llsec_device_key *key)
425 struct mac802154_llsec_device *dev;
426 struct mac802154_llsec_device_key *devkey;
428 dev = llsec_dev_find_long(sec, dev_addr);
433 if (llsec_devkey_find(dev, &key->key_id))
436 devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
440 devkey->devkey = *key;
441 list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
445 int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
447 const struct ieee802154_llsec_device_key *key)
449 struct mac802154_llsec_device *dev;
450 struct mac802154_llsec_device_key *devkey;
452 dev = llsec_dev_find_long(sec, dev_addr);
457 devkey = llsec_devkey_find(dev, &key->key_id);
461 list_del_rcu(&devkey->devkey.list);
462 kfree_rcu(devkey, rcu);
466 static struct mac802154_llsec_seclevel*
467 llsec_find_seclevel(const struct mac802154_llsec *sec,
468 const struct ieee802154_llsec_seclevel *sl)
470 struct ieee802154_llsec_seclevel *pos;
472 list_for_each_entry(pos, &sec->table.security_levels, list) {
473 if (pos->frame_type != sl->frame_type ||
474 (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
475 pos->cmd_frame_id != sl->cmd_frame_id) ||
476 pos->device_override != sl->device_override ||
477 pos->sec_levels != sl->sec_levels)
480 return container_of(pos, struct mac802154_llsec_seclevel,
487 int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
488 const struct ieee802154_llsec_seclevel *sl)
490 struct mac802154_llsec_seclevel *entry;
492 if (llsec_find_seclevel(sec, sl))
495 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
501 list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
506 int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
507 const struct ieee802154_llsec_seclevel *sl)
509 struct mac802154_llsec_seclevel *pos;
511 pos = llsec_find_seclevel(sec, sl);
515 list_del_rcu(&pos->level.list);
521 static int llsec_recover_addr(struct mac802154_llsec *sec,
522 struct ieee802154_addr *addr)
524 __le16 caddr = sec->params.coord_shortaddr;
526 addr->pan_id = sec->params.pan_id;
528 if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
530 } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
531 addr->extended_addr = sec->params.coord_hwaddr;
532 addr->mode = IEEE802154_ADDR_LONG;
534 addr->short_addr = sec->params.coord_shortaddr;
535 addr->mode = IEEE802154_ADDR_SHORT;
541 static struct mac802154_llsec_key*
542 llsec_lookup_key(struct mac802154_llsec *sec,
543 const struct ieee802154_hdr *hdr,
544 const struct ieee802154_addr *addr,
545 struct ieee802154_llsec_key_id *key_id)
547 struct ieee802154_addr devaddr = *addr;
548 u8 key_id_mode = hdr->sec.key_id_mode;
549 struct ieee802154_llsec_key_entry *key_entry;
550 struct mac802154_llsec_key *key;
552 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
553 devaddr.mode == IEEE802154_ADDR_NONE) {
554 if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
555 devaddr.extended_addr = sec->params.coord_hwaddr;
556 devaddr.mode = IEEE802154_ADDR_LONG;
557 } else if (llsec_recover_addr(sec, &devaddr) < 0) {
562 list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
563 const struct ieee802154_llsec_key_id *id = &key_entry->id;
565 if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
568 if (id->mode != key_id_mode)
571 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
572 if (ieee802154_addr_equal(&devaddr, &id->device_addr))
575 if (id->id != hdr->sec.key_id)
578 if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
579 (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
580 id->short_source == hdr->sec.short_src) ||
581 (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
582 id->extended_source == hdr->sec.extended_src))
590 key = container_of(key_entry->key, struct mac802154_llsec_key, key);
592 *key_id = key_entry->id;
593 return llsec_key_get(key);
596 static void llsec_geniv(u8 iv[16], __le64 addr,
597 const struct ieee802154_sechdr *sec)
599 __be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
600 __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
602 iv[0] = 1; /* L' = L - 1 = 1 */
603 memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
604 memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
611 llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
612 const struct ieee802154_hdr *hdr,
613 struct mac802154_llsec_key *key)
616 struct scatterlist src;
617 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
621 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
622 /* Compute data payload offset and data length */
623 data = skb_mac_header(skb) + skb->mac_len;
624 datalen = skb_tail_pointer(skb) - data;
625 sg_init_one(&src, data, datalen);
627 skcipher_request_set_sync_tfm(req, key->tfm0);
628 skcipher_request_set_callback(req, 0, NULL, NULL);
629 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
630 err = crypto_skcipher_encrypt(req);
631 skcipher_request_zero(req);
635 static struct crypto_aead*
636 llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
640 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
641 if (crypto_aead_authsize(key->tfm[i]) == authlen)
648 llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
649 const struct ieee802154_hdr *hdr,
650 struct mac802154_llsec_key *key)
654 int authlen, assoclen, datalen, rc;
655 struct scatterlist sg;
656 struct aead_request *req;
658 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
659 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
661 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
665 assoclen = skb->mac_len;
667 data = skb_mac_header(skb) + skb->mac_len;
668 datalen = skb_tail_pointer(skb) - data;
670 skb_put(skb, authlen);
672 sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen + authlen);
674 if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) {
679 aead_request_set_callback(req, 0, NULL, NULL);
680 aead_request_set_crypt(req, &sg, &sg, datalen, iv);
681 aead_request_set_ad(req, assoclen);
683 rc = crypto_aead_encrypt(req);
685 kfree_sensitive(req);
690 static int llsec_do_encrypt(struct sk_buff *skb,
691 const struct mac802154_llsec *sec,
692 const struct ieee802154_hdr *hdr,
693 struct mac802154_llsec_key *key)
695 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
696 return llsec_do_encrypt_unauth(skb, sec, hdr, key);
698 return llsec_do_encrypt_auth(skb, sec, hdr, key);
701 int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
703 struct ieee802154_hdr hdr;
704 int rc, authlen, hlen;
705 struct mac802154_llsec_key *key;
708 hlen = ieee802154_hdr_pull(skb, &hdr);
710 if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
713 if (!hdr.fc.security_enabled ||
714 (hdr.sec.level == IEEE802154_SCF_SECLEVEL_NONE)) {
719 authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
721 if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
726 read_lock_bh(&sec->lock);
728 if (!sec->params.enabled) {
733 key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
739 read_unlock_bh(&sec->lock);
741 write_lock_bh(&sec->lock);
743 frame_ctr = be32_to_cpu(sec->params.frame_counter);
744 hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
745 if (frame_ctr == 0xFFFFFFFF) {
746 write_unlock_bh(&sec->lock);
752 sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
754 write_unlock_bh(&sec->lock);
758 skb->mac_len = ieee802154_hdr_push(skb, &hdr);
759 skb_reset_mac_header(skb);
761 rc = llsec_do_encrypt(skb, sec, &hdr, key);
767 read_unlock_bh(&sec->lock);
773 static struct mac802154_llsec_device*
774 llsec_lookup_dev(struct mac802154_llsec *sec,
775 const struct ieee802154_addr *addr)
777 struct ieee802154_addr devaddr = *addr;
778 struct mac802154_llsec_device *dev = NULL;
780 if (devaddr.mode == IEEE802154_ADDR_NONE &&
781 llsec_recover_addr(sec, &devaddr) < 0)
784 if (devaddr.mode == IEEE802154_ADDR_SHORT) {
785 u32 key = llsec_dev_hash_short(devaddr.short_addr,
788 hash_for_each_possible_rcu(sec->devices_short, dev,
790 if (dev->dev.pan_id == devaddr.pan_id &&
791 dev->dev.short_addr == devaddr.short_addr)
795 u64 key = llsec_dev_hash_long(devaddr.extended_addr);
797 hash_for_each_possible_rcu(sec->devices_hw, dev,
799 if (dev->dev.hwaddr == devaddr.extended_addr)
808 llsec_lookup_seclevel(const struct mac802154_llsec *sec,
809 u8 frame_type, u8 cmd_frame_id,
810 struct ieee802154_llsec_seclevel *rlevel)
812 struct ieee802154_llsec_seclevel *level;
814 list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
815 if (level->frame_type == frame_type &&
816 (frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
817 level->cmd_frame_id == cmd_frame_id)) {
827 llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
828 const struct ieee802154_hdr *hdr,
829 struct mac802154_llsec_key *key, __le64 dev_addr)
834 struct scatterlist src;
835 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
838 llsec_geniv(iv, dev_addr, &hdr->sec);
839 data = skb_mac_header(skb) + skb->mac_len;
840 datalen = skb_tail_pointer(skb) - data;
842 sg_init_one(&src, data, datalen);
844 skcipher_request_set_sync_tfm(req, key->tfm0);
845 skcipher_request_set_callback(req, 0, NULL, NULL);
846 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
848 err = crypto_skcipher_decrypt(req);
849 skcipher_request_zero(req);
854 llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
855 const struct ieee802154_hdr *hdr,
856 struct mac802154_llsec_key *key, __le64 dev_addr)
860 int authlen, datalen, assoclen, rc;
861 struct scatterlist sg;
862 struct aead_request *req;
864 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
865 llsec_geniv(iv, dev_addr, &hdr->sec);
867 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
871 assoclen = skb->mac_len;
873 data = skb_mac_header(skb) + skb->mac_len;
874 datalen = skb_tail_pointer(skb) - data;
876 sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen);
878 if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) {
879 assoclen += datalen - authlen;
883 aead_request_set_callback(req, 0, NULL, NULL);
884 aead_request_set_crypt(req, &sg, &sg, datalen, iv);
885 aead_request_set_ad(req, assoclen);
887 rc = crypto_aead_decrypt(req);
889 kfree_sensitive(req);
890 skb_trim(skb, skb->len - authlen);
896 llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
897 const struct ieee802154_hdr *hdr,
898 struct mac802154_llsec_key *key, __le64 dev_addr)
900 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
901 return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
903 return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
907 llsec_update_devkey_record(struct mac802154_llsec_device *dev,
908 const struct ieee802154_llsec_key_id *in_key)
910 struct mac802154_llsec_device_key *devkey;
912 devkey = llsec_devkey_find(dev, in_key);
915 struct mac802154_llsec_device_key *next;
917 next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
921 next->devkey.key_id = *in_key;
923 spin_lock_bh(&dev->lock);
925 devkey = llsec_devkey_find(dev, in_key);
927 list_add_rcu(&next->devkey.list, &dev->dev.keys);
929 kfree_sensitive(next);
931 spin_unlock_bh(&dev->lock);
938 llsec_update_devkey_info(struct mac802154_llsec_device *dev,
939 const struct ieee802154_llsec_key_id *in_key,
942 struct mac802154_llsec_device_key *devkey = NULL;
944 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
945 devkey = llsec_devkey_find(dev, in_key);
950 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
951 int rc = llsec_update_devkey_record(dev, in_key);
957 spin_lock_bh(&dev->lock);
959 if ((!devkey && frame_counter < dev->dev.frame_counter) ||
960 (devkey && frame_counter < devkey->devkey.frame_counter)) {
961 spin_unlock_bh(&dev->lock);
966 devkey->devkey.frame_counter = frame_counter + 1;
968 dev->dev.frame_counter = frame_counter + 1;
970 spin_unlock_bh(&dev->lock);
975 int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
977 struct ieee802154_hdr hdr;
978 struct mac802154_llsec_key *key;
979 struct ieee802154_llsec_key_id key_id;
980 struct mac802154_llsec_device *dev;
981 struct ieee802154_llsec_seclevel seclevel;
986 if (ieee802154_hdr_peek(skb, &hdr) < 0)
988 if (!hdr.fc.security_enabled)
990 if (hdr.fc.version == 0)
993 read_lock_bh(&sec->lock);
994 if (!sec->params.enabled) {
995 read_unlock_bh(&sec->lock);
998 read_unlock_bh(&sec->lock);
1002 key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
1008 dev = llsec_lookup_dev(sec, &hdr.source);
1014 if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
1019 if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
1020 (hdr.sec.level == 0 && seclevel.device_override &&
1021 !dev->dev.seclevel_exempt)) {
1026 frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
1028 if (frame_ctr == 0xffffffff) {
1033 err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
1037 dev_addr = dev->dev.hwaddr;
1041 err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);