1 // SPDX-License-Identifier: GPL-2.0
3 * Management Component Transport Protocol (MCTP)
5 * Copyright (c) 2021 Code Construct
6 * Copyright (c) 2021 Google
9 #include <linux/compat.h>
10 #include <linux/if_arp.h>
11 #include <linux/net.h>
12 #include <linux/mctp.h>
13 #include <linux/module.h>
14 #include <linux/socket.h>
17 #include <net/mctpdevice.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/mctp.h>
23 /* socket implementation */
25 static void mctp_sk_expire_keys(struct timer_list *timer);
27 static int mctp_release(struct socket *sock)
29 struct sock *sk = sock->sk;
33 sk->sk_prot->close(sk, 0);
39 /* Generic sockaddr checks, padding checks only so far */
40 static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
42 return !addr->__smctp_pad0 && !addr->__smctp_pad1;
45 static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
47 return !addr->__smctp_pad0[0] &&
48 !addr->__smctp_pad0[1] &&
49 !addr->__smctp_pad0[2];
52 static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
54 struct sock *sk = sock->sk;
55 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
56 struct sockaddr_mctp *smctp;
59 if (addrlen < sizeof(*smctp))
62 if (addr->sa_family != AF_MCTP)
65 if (!capable(CAP_NET_BIND_SERVICE))
68 /* it's a valid sockaddr for MCTP, cast and do protocol checks */
69 smctp = (struct sockaddr_mctp *)addr;
71 if (!mctp_sockaddr_is_ok(smctp))
76 /* TODO: allow rebind */
81 msk->bind_net = smctp->smctp_network;
82 msk->bind_addr = smctp->smctp_addr.s_addr;
83 msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
85 rc = sk->sk_prot->hash(sk);
93 static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
95 DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
96 int rc, addrlen = msg->msg_namelen;
97 struct sock *sk = sock->sk;
98 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
99 struct mctp_skb_cb *cb;
100 struct mctp_route *rt;
101 struct sk_buff *skb = NULL;
105 const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
108 if (addrlen < sizeof(struct sockaddr_mctp))
110 if (addr->smctp_family != AF_MCTP)
112 if (!mctp_sockaddr_is_ok(addr))
114 if (addr->smctp_tag & ~tagbits)
116 /* can't preallocate a non-owned tag */
117 if (addr->smctp_tag & MCTP_TAG_PREALLOC &&
118 !(addr->smctp_tag & MCTP_TAG_OWNER))
122 /* TODO: connect()ed sockets */
123 return -EDESTADDRREQ;
126 if (!capable(CAP_NET_RAW))
129 if (addr->smctp_network == MCTP_NET_ANY)
130 addr->smctp_network = mctp_default_net(sock_net(sk));
132 /* direct addressing */
133 if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
134 DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
135 extaddr, msg->msg_name);
136 struct net_device *dev;
140 dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
141 /* check for correct halen */
142 if (dev && extaddr->smctp_halen == dev->addr_len) {
143 hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
151 rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
152 addr->smctp_addr.s_addr);
157 hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
160 skb = sock_alloc_send_skb(sk, hlen + 1 + len,
161 msg->msg_flags & MSG_DONTWAIT, &rc);
165 skb_reserve(skb, hlen);
167 /* set type as fist byte in payload */
168 *(u8 *)skb_put(skb, 1) = addr->smctp_type;
170 rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
176 cb->net = addr->smctp_network;
179 /* fill extended address in cb */
180 DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
181 extaddr, msg->msg_name);
183 if (!mctp_sockaddr_ext_is_ok(extaddr) ||
184 extaddr->smctp_halen > sizeof(cb->haddr)) {
189 cb->ifindex = extaddr->smctp_ifindex;
190 /* smctp_halen is checked above */
191 cb->halen = extaddr->smctp_halen;
192 memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
195 rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
205 static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
208 DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
209 struct sock *sk = sock->sk;
210 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
216 if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
219 skb = skb_recv_datagram(sk, flags, &rc);
228 /* extract message type, remove from data */
229 type = *((u8 *)skb->data);
230 msglen = skb->len - 1;
233 msg->msg_flags |= MSG_TRUNC;
237 rc = skb_copy_datagram_msg(skb, 1, msg, len);
241 sock_recv_cmsgs(msg, sk, skb);
244 struct mctp_skb_cb *cb = mctp_cb(skb);
245 /* TODO: expand mctp_skb_cb for header fields? */
246 struct mctp_hdr *hdr = mctp_hdr(skb);
248 addr = msg->msg_name;
249 addr->smctp_family = AF_MCTP;
250 addr->__smctp_pad0 = 0;
251 addr->smctp_network = cb->net;
252 addr->smctp_addr.s_addr = hdr->src;
253 addr->smctp_type = type;
254 addr->smctp_tag = hdr->flags_seq_tag &
255 (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
256 addr->__smctp_pad1 = 0;
257 msg->msg_namelen = sizeof(*addr);
260 DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae,
262 msg->msg_namelen = sizeof(*ae);
263 ae->smctp_ifindex = cb->ifindex;
264 ae->smctp_halen = cb->halen;
265 memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
266 memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
267 memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
273 if (flags & MSG_TRUNC)
277 skb_free_datagram(sk, skb);
281 /* We're done with the key; invalidate, stop reassembly, and remove from lists.
283 static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net,
284 unsigned long flags, unsigned long reason)
285 __releases(&key->lock)
286 __must_hold(&net->mctp.keys_lock)
290 trace_mctp_key_release(key, reason);
291 skb = key->reasm_head;
292 key->reasm_head = NULL;
293 key->reasm_dead = true;
295 mctp_dev_release_key(key->dev, key);
296 spin_unlock_irqrestore(&key->lock, flags);
298 if (!hlist_unhashed(&key->hlist)) {
299 hlist_del_init(&key->hlist);
300 hlist_del_init(&key->sklist);
301 /* unref for the lists */
308 static int mctp_setsockopt(struct socket *sock, int level, int optname,
309 sockptr_t optval, unsigned int optlen)
311 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
314 if (level != SOL_MCTP)
317 if (optname == MCTP_OPT_ADDR_EXT) {
318 if (optlen != sizeof(int))
320 if (copy_from_sockptr(&val, optval, sizeof(int)))
329 static int mctp_getsockopt(struct socket *sock, int level, int optname,
330 char __user *optval, int __user *optlen)
332 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
335 if (level != SOL_MCTP)
338 if (get_user(len, optlen))
341 if (optname == MCTP_OPT_ADDR_EXT) {
342 if (len != sizeof(int))
344 val = !!msk->addr_ext;
345 if (copy_to_user(optval, &val, len))
353 static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
355 struct net *net = sock_net(&msk->sk);
356 struct mctp_sk_key *key = NULL;
357 struct mctp_ioc_tag_ctl ctl;
361 if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
370 key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
375 ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
376 if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
378 /* Unwind our key allocation: the keys list lock needs to be
379 * taken before the individual key locks, and we need a valid
380 * flags value (fl2) to pass to __mctp_key_remove, hence the
381 * second spin_lock_irqsave() rather than a plain spin_lock().
383 spin_lock_irqsave(&net->mctp.keys_lock, flags);
384 spin_lock_irqsave(&key->lock, fl2);
385 __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
387 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
395 static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
397 struct net *net = sock_net(&msk->sk);
398 struct mctp_ioc_tag_ctl ctl;
399 unsigned long flags, fl2;
400 struct mctp_sk_key *key;
401 struct hlist_node *tmp;
405 if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
411 /* Must be a local tag, TO set, preallocated */
412 if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
415 tag = ctl.tag & MCTP_TAG_MASK;
418 spin_lock_irqsave(&net->mctp.keys_lock, flags);
419 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
420 /* we do an irqsave here, even though we know the irq state,
421 * so we have the flags to pass to __mctp_key_remove
423 spin_lock_irqsave(&key->lock, fl2);
424 if (key->manual_alloc &&
425 ctl.peer_addr == key->peer_addr &&
427 __mctp_key_remove(key, net, fl2,
428 MCTP_TRACE_KEY_DROPPED);
431 spin_unlock_irqrestore(&key->lock, fl2);
434 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
439 static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
441 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
444 case SIOCMCTPALLOCTAG:
445 return mctp_ioctl_alloctag(msk, arg);
446 case SIOCMCTPDROPTAG:
447 return mctp_ioctl_droptag(msk, arg);
454 static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd,
457 void __user *argp = compat_ptr(arg);
460 /* These have compatible ptr layouts */
461 case SIOCMCTPALLOCTAG:
462 case SIOCMCTPDROPTAG:
463 return mctp_ioctl(sock, cmd, (unsigned long)argp);
470 static const struct proto_ops mctp_dgram_ops = {
472 .release = mctp_release,
474 .connect = sock_no_connect,
475 .socketpair = sock_no_socketpair,
476 .accept = sock_no_accept,
477 .getname = sock_no_getname,
478 .poll = datagram_poll,
480 .gettstamp = sock_gettstamp,
481 .listen = sock_no_listen,
482 .shutdown = sock_no_shutdown,
483 .setsockopt = mctp_setsockopt,
484 .getsockopt = mctp_getsockopt,
485 .sendmsg = mctp_sendmsg,
486 .recvmsg = mctp_recvmsg,
487 .mmap = sock_no_mmap,
488 .sendpage = sock_no_sendpage,
490 .compat_ioctl = mctp_compat_ioctl,
494 static void mctp_sk_expire_keys(struct timer_list *timer)
496 struct mctp_sock *msk = container_of(timer, struct mctp_sock,
498 struct net *net = sock_net(&msk->sk);
499 unsigned long next_expiry, flags, fl2;
500 struct mctp_sk_key *key;
501 struct hlist_node *tmp;
502 bool next_expiry_valid = false;
504 spin_lock_irqsave(&net->mctp.keys_lock, flags);
506 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
507 /* don't expire. manual_alloc is immutable, no locking
510 if (key->manual_alloc)
513 spin_lock_irqsave(&key->lock, fl2);
514 if (!time_after_eq(key->expiry, jiffies)) {
515 __mctp_key_remove(key, net, fl2,
516 MCTP_TRACE_KEY_TIMEOUT);
520 if (next_expiry_valid) {
521 if (time_before(key->expiry, next_expiry))
522 next_expiry = key->expiry;
524 next_expiry = key->expiry;
525 next_expiry_valid = true;
527 spin_unlock_irqrestore(&key->lock, fl2);
530 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
532 if (next_expiry_valid)
533 mod_timer(timer, next_expiry);
536 static int mctp_sk_init(struct sock *sk)
538 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
540 INIT_HLIST_HEAD(&msk->keys);
541 timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
545 static void mctp_sk_close(struct sock *sk, long timeout)
547 sk_common_release(sk);
550 static int mctp_sk_hash(struct sock *sk)
552 struct net *net = sock_net(sk);
554 mutex_lock(&net->mctp.bind_lock);
555 sk_add_node_rcu(sk, &net->mctp.binds);
556 mutex_unlock(&net->mctp.bind_lock);
561 static void mctp_sk_unhash(struct sock *sk)
563 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
564 struct net *net = sock_net(sk);
565 unsigned long flags, fl2;
566 struct mctp_sk_key *key;
567 struct hlist_node *tmp;
569 /* remove from any type-based binds */
570 mutex_lock(&net->mctp.bind_lock);
571 sk_del_node_init_rcu(sk);
572 mutex_unlock(&net->mctp.bind_lock);
574 /* remove tag allocations */
575 spin_lock_irqsave(&net->mctp.keys_lock, flags);
576 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
577 spin_lock_irqsave(&key->lock, fl2);
578 __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
580 sock_set_flag(sk, SOCK_DEAD);
581 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
583 /* Since there are no more tag allocations (we have removed all of the
584 * keys), stop any pending expiry events. the timer cannot be re-queued
585 * as the sk is no longer observable
587 del_timer_sync(&msk->key_expiry);
590 static void mctp_sk_destruct(struct sock *sk)
592 skb_queue_purge(&sk->sk_receive_queue);
595 static struct proto mctp_proto = {
597 .owner = THIS_MODULE,
598 .obj_size = sizeof(struct mctp_sock),
599 .init = mctp_sk_init,
600 .close = mctp_sk_close,
601 .hash = mctp_sk_hash,
602 .unhash = mctp_sk_unhash,
605 static int mctp_pf_create(struct net *net, struct socket *sock,
606 int protocol, int kern)
608 const struct proto_ops *ops;
614 return -EPROTONOSUPPORT;
616 /* only datagram sockets are supported */
617 if (sock->type != SOCK_DGRAM)
618 return -ESOCKTNOSUPPORT;
621 ops = &mctp_dgram_ops;
623 sock->state = SS_UNCONNECTED;
626 sk = sk_alloc(net, PF_MCTP, GFP_KERNEL, proto, kern);
630 sock_init_data(sock, sk);
631 sk->sk_destruct = mctp_sk_destruct;
634 if (sk->sk_prot->init)
635 rc = sk->sk_prot->init(sk);
648 static struct net_proto_family mctp_pf = {
650 .create = mctp_pf_create,
651 .owner = THIS_MODULE,
654 static __init int mctp_init(void)
658 /* ensure our uapi tag definitions match the header format */
659 BUILD_BUG_ON(MCTP_TAG_OWNER != MCTP_HDR_FLAG_TO);
660 BUILD_BUG_ON(MCTP_TAG_MASK != MCTP_HDR_TAG_MASK);
662 pr_info("mctp: management component transport protocol core\n");
664 rc = sock_register(&mctp_pf);
668 rc = proto_register(&mctp_proto, 0);
672 rc = mctp_routes_init();
674 goto err_unreg_proto;
676 rc = mctp_neigh_init();
678 goto err_unreg_routes;
687 proto_unregister(&mctp_proto);
689 sock_unregister(PF_MCTP);
694 static __exit void mctp_exit(void)
699 proto_unregister(&mctp_proto);
700 sock_unregister(PF_MCTP);
703 subsys_initcall(mctp_init);
704 module_exit(mctp_exit);
706 MODULE_DESCRIPTION("MCTP core");
707 MODULE_LICENSE("GPL v2");
708 MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
710 MODULE_ALIAS_NETPROTO(PF_MCTP);