3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Socket Layer Interface
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
73 Port to new kernel development version.
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for incoming connections
79 so we can start developing server apps
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
100 *******************************************************************************/
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched/signal.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/ioctls.h>
123 #include <linux/capability.h>
124 #include <linux/mm.h>
125 #include <linux/interrupt.h>
126 #include <linux/proc_fs.h>
127 #include <linux/stat.h>
128 #include <linux/init.h>
129 #include <linux/poll.h>
130 #include <linux/jiffies.h>
131 #include <net/net_namespace.h>
132 #include <net/neighbour.h>
134 #include <net/fib_rules.h>
137 #include <net/dn_nsp.h>
138 #include <net/dn_dev.h>
139 #include <net/dn_route.h>
140 #include <net/dn_fib.h>
141 #include <net/dn_neigh.h>
148 static void dn_keepalive(struct sock *sk);
150 #define DN_SK_HASH_SHIFT 8
151 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
152 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
155 static const struct proto_ops dn_proto_ops;
156 static DEFINE_RWLOCK(dn_hash_lock);
157 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
158 static struct hlist_head dn_wild_sk;
159 static atomic_long_t decnet_memory_allocated;
161 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
162 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
164 static struct hlist_head *dn_find_list(struct sock *sk)
166 struct dn_scp *scp = DN_SK(sk);
168 if (scp->addr.sdn_flags & SDF_WILD)
169 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
171 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
175 * Valid ports are those greater than zero and not already in use.
177 static int check_port(__le16 port)
184 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
185 struct dn_scp *scp = DN_SK(sk);
186 if (scp->addrloc == port)
192 static unsigned short port_alloc(struct sock *sk)
194 struct dn_scp *scp = DN_SK(sk);
195 static unsigned short port = 0x2000;
196 unsigned short i_port = port;
198 while(check_port(cpu_to_le16(++port)) != 0) {
203 scp->addrloc = cpu_to_le16(port);
209 * Since this is only ever called from user
210 * level, we don't need a write_lock() version
213 static int dn_hash_sock(struct sock *sk)
215 struct dn_scp *scp = DN_SK(sk);
216 struct hlist_head *list;
219 BUG_ON(sk_hashed(sk));
221 write_lock_bh(&dn_hash_lock);
223 if (!scp->addrloc && !port_alloc(sk))
227 if ((list = dn_find_list(sk)) == NULL)
230 sk_add_node(sk, list);
233 write_unlock_bh(&dn_hash_lock);
237 static void dn_unhash_sock(struct sock *sk)
239 write_lock(&dn_hash_lock);
240 sk_del_node_init(sk);
241 write_unlock(&dn_hash_lock);
244 static void dn_unhash_sock_bh(struct sock *sk)
246 write_lock_bh(&dn_hash_lock);
247 sk_del_node_init(sk);
248 write_unlock_bh(&dn_hash_lock);
251 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
254 unsigned int hash = addr->sdn_objnum;
257 hash = addr->sdn_objnamel;
258 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
259 hash ^= addr->sdn_objname[i];
264 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
268 * Called to transform a socket from bound (i.e. with a local address)
269 * into a listening socket (doesn't need a local port number) and rehashes
270 * based upon the object name/number.
272 static void dn_rehash_sock(struct sock *sk)
274 struct hlist_head *list;
275 struct dn_scp *scp = DN_SK(sk);
277 if (scp->addr.sdn_flags & SDF_WILD)
280 write_lock_bh(&dn_hash_lock);
281 sk_del_node_init(sk);
282 DN_SK(sk)->addrloc = 0;
283 list = listen_hash(&DN_SK(sk)->addr);
284 sk_add_node(sk, list);
285 write_unlock_bh(&dn_hash_lock);
288 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
296 *buf++ = sdn->sdn_objnum;
300 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
302 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
307 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
308 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
309 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
317 * On reception of usernames, we handle types 1 and 0 for destination
318 * addresses only. Types 2 and 4 are used for source addresses, but the
319 * UIC, GIC are ignored and they are both treated the same way. Type 3
320 * is never used as I've no idea what its purpose might be or what its
323 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
330 sdn->sdn_objnamel = cpu_to_le16(0);
331 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
342 sdn->sdn_objnum = type;
364 sdn->sdn_objnamel = cpu_to_le16(*data++);
365 len -= le16_to_cpu(sdn->sdn_objnamel);
367 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
370 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
375 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
377 struct hlist_head *list = listen_hash(addr);
380 read_lock(&dn_hash_lock);
381 sk_for_each(sk, list) {
382 struct dn_scp *scp = DN_SK(sk);
383 if (sk->sk_state != TCP_LISTEN)
385 if (scp->addr.sdn_objnum) {
386 if (scp->addr.sdn_objnum != addr->sdn_objnum)
389 if (addr->sdn_objnum)
391 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
393 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
397 read_unlock(&dn_hash_lock);
401 sk = sk_head(&dn_wild_sk);
403 if (sk->sk_state == TCP_LISTEN)
409 read_unlock(&dn_hash_lock);
413 struct sock *dn_find_by_skb(struct sk_buff *skb)
415 struct dn_skb_cb *cb = DN_SKB_CB(skb);
419 read_lock(&dn_hash_lock);
420 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
422 if (cb->src != dn_saddr2dn(&scp->peer))
424 if (cb->dst_port != scp->addrloc)
426 if (scp->addrrem && (cb->src_port != scp->addrrem))
433 read_unlock(&dn_hash_lock);
439 static void dn_destruct(struct sock *sk)
441 struct dn_scp *scp = DN_SK(sk);
443 skb_queue_purge(&scp->data_xmit_queue);
444 skb_queue_purge(&scp->other_xmit_queue);
445 skb_queue_purge(&scp->other_receive_queue);
447 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
450 static unsigned long dn_memory_pressure;
452 static void dn_enter_memory_pressure(struct sock *sk)
454 if (!dn_memory_pressure) {
455 dn_memory_pressure = 1;
459 static struct proto dn_proto = {
461 .owner = THIS_MODULE,
462 .enter_memory_pressure = dn_enter_memory_pressure,
463 .memory_pressure = &dn_memory_pressure,
464 .memory_allocated = &decnet_memory_allocated,
465 .sysctl_mem = sysctl_decnet_mem,
466 .sysctl_wmem = sysctl_decnet_wmem,
467 .sysctl_rmem = sysctl_decnet_rmem,
468 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
469 .obj_size = sizeof(struct dn_sock),
472 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
475 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
481 sock->ops = &dn_proto_ops;
482 sock_init_data(sock, sk);
484 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
485 sk->sk_destruct = dn_destruct;
486 sk->sk_no_check_tx = 1;
487 sk->sk_family = PF_DECnet;
489 sk->sk_allocation = gfp;
490 sk->sk_sndbuf = sysctl_decnet_wmem[1];
491 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
493 /* Initialization of DECnet Session Control Port */
495 scp->state = DN_O; /* Open */
496 scp->numdat = 1; /* Next data seg to tx */
497 scp->numoth = 1; /* Next oth data to tx */
498 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
499 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
500 scp->ackrcv_dat = 0; /* Highest data ack recv*/
501 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
502 scp->flowrem_sw = DN_SEND;
503 scp->flowloc_sw = DN_SEND;
504 scp->flowrem_dat = 0;
505 scp->flowrem_oth = 1;
506 scp->flowloc_dat = 0;
507 scp->flowloc_oth = 1;
508 scp->services_rem = 0;
509 scp->services_loc = 1 | NSP_FC_NONE;
511 scp->info_loc = 0x03; /* NSP version 4.1 */
512 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
515 scp->accept_mode = ACC_IMMED;
516 scp->addr.sdn_family = AF_DECnet;
517 scp->peer.sdn_family = AF_DECnet;
518 scp->accessdata.acc_accl = 5;
519 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
521 scp->max_window = NSP_MAX_WINDOW;
522 scp->snd_window = NSP_MIN_WINDOW;
523 scp->nsp_srtt = NSP_INITIAL_SRTT;
524 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
525 scp->nsp_rxtshift = 0;
527 skb_queue_head_init(&scp->data_xmit_queue);
528 skb_queue_head_init(&scp->other_xmit_queue);
529 skb_queue_head_init(&scp->other_receive_queue);
532 scp->persist_fxn = NULL;
533 scp->keepalive = 10 * HZ;
534 scp->keepalive_fxn = dn_keepalive;
536 dn_start_slow_timer(sk);
543 * FIXME: Should respond to SO_KEEPALIVE etc.
545 static void dn_keepalive(struct sock *sk)
547 struct dn_scp *scp = DN_SK(sk);
550 * By checking the other_data transmit queue is empty
551 * we are double checking that we are not sending too
552 * many of these keepalive frames.
554 if (skb_queue_empty(&scp->other_xmit_queue))
555 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
560 * Timer for shutdown/destroyed sockets.
561 * When socket is dead & no packets have been sent for a
562 * certain amount of time, they are removed by this
563 * routine. Also takes care of sending out DI & DC
564 * frames at correct times.
566 int dn_destroy_timer(struct sock *sk)
568 struct dn_scp *scp = DN_SK(sk);
570 scp->persist = dn_nsp_persist(sk);
572 switch (scp->state) {
574 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
575 if (scp->nsp_rxtshift >= decnet_di_count)
580 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
581 if (scp->nsp_rxtshift >= decnet_dr_count)
586 if (scp->nsp_rxtshift < decnet_dn_count) {
587 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
588 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
594 scp->persist = (HZ * decnet_time_wait);
599 if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
608 static void dn_destroy_sock(struct sock *sk)
610 struct dn_scp *scp = DN_SK(sk);
612 scp->nsp_rxtshift = 0; /* reset back off */
615 if (sk->sk_socket->state != SS_UNCONNECTED)
616 sk->sk_socket->state = SS_DISCONNECTING;
619 sk->sk_state = TCP_CLOSE;
621 switch (scp->state) {
623 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
625 scp->persist_fxn = dn_destroy_timer;
626 scp->persist = dn_nsp_persist(sk);
637 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
647 scp->persist_fxn = dn_destroy_timer;
648 scp->persist = dn_nsp_persist(sk);
651 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
654 dn_stop_slow_timer(sk);
656 dn_unhash_sock_bh(sk);
663 char *dn_addr2asc(__u16 addr, char *buf)
665 unsigned short node, area;
667 node = addr & 0x03ff;
669 sprintf(buf, "%hd.%hd", area, node);
676 static int dn_create(struct net *net, struct socket *sock, int protocol,
681 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
684 if (!net_eq(net, &init_net))
685 return -EAFNOSUPPORT;
687 switch (sock->type) {
689 if (protocol != DNPROTO_NSP)
690 return -EPROTONOSUPPORT;
695 return -ESOCKTNOSUPPORT;
699 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
702 sk->sk_protocol = protocol;
709 dn_release(struct socket *sock)
711 struct sock *sk = sock->sk;
725 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
727 struct sock *sk = sock->sk;
728 struct dn_scp *scp = DN_SK(sk);
729 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
730 struct net_device *dev, *ldev;
733 if (addr_len != sizeof(struct sockaddr_dn))
736 if (saddr->sdn_family != AF_DECnet)
739 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
742 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
745 if (saddr->sdn_flags & ~SDF_WILD)
748 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
749 (saddr->sdn_flags & SDF_WILD)))
752 if (!(saddr->sdn_flags & SDF_WILD)) {
753 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
756 for_each_netdev_rcu(&init_net, dev) {
759 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
766 return -EADDRNOTAVAIL;
772 if (sock_flag(sk, SOCK_ZAPPED)) {
773 memcpy(&scp->addr, saddr, addr_len);
774 sock_reset_flag(sk, SOCK_ZAPPED);
776 rv = dn_hash_sock(sk);
778 sock_set_flag(sk, SOCK_ZAPPED);
786 static int dn_auto_bind(struct socket *sock)
788 struct sock *sk = sock->sk;
789 struct dn_scp *scp = DN_SK(sk);
792 sock_reset_flag(sk, SOCK_ZAPPED);
794 scp->addr.sdn_flags = 0;
795 scp->addr.sdn_objnum = 0;
798 * This stuff is to keep compatibility with Eduardo's
799 * patch. I hope I can dispense with it shortly...
801 if ((scp->accessdata.acc_accl != 0) &&
802 (scp->accessdata.acc_accl <= 12)) {
804 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
805 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
807 scp->accessdata.acc_accl = 0;
808 memset(scp->accessdata.acc_acc, 0, 40);
810 /* End of compatibility stuff */
812 scp->addr.sdn_add.a_len = cpu_to_le16(2);
813 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
815 rv = dn_hash_sock(sk);
817 sock_set_flag(sk, SOCK_ZAPPED);
823 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
825 struct dn_scp *scp = DN_SK(sk);
829 if (scp->state != DN_CR)
833 scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
834 dn_send_conn_conf(sk, allocation);
836 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
839 if (scp->state == DN_CC)
840 *timeo = schedule_timeout(*timeo);
843 if (scp->state == DN_RUN)
845 err = sock_error(sk);
848 err = sock_intr_errno(*timeo);
849 if (signal_pending(current))
854 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
856 finish_wait(sk_sleep(sk), &wait);
858 sk->sk_socket->state = SS_CONNECTED;
859 } else if (scp->state != DN_CC) {
860 sk->sk_socket->state = SS_UNCONNECTED;
865 static int dn_wait_run(struct sock *sk, long *timeo)
867 struct dn_scp *scp = DN_SK(sk);
871 if (scp->state == DN_RUN)
877 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
880 if (scp->state == DN_CI || scp->state == DN_CC)
881 *timeo = schedule_timeout(*timeo);
884 if (scp->state == DN_RUN)
886 err = sock_error(sk);
889 err = sock_intr_errno(*timeo);
890 if (signal_pending(current))
895 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
897 finish_wait(sk_sleep(sk), &wait);
900 sk->sk_socket->state = SS_CONNECTED;
901 } else if (scp->state != DN_CI && scp->state != DN_CC) {
902 sk->sk_socket->state = SS_UNCONNECTED;
907 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
909 struct socket *sock = sk->sk_socket;
910 struct dn_scp *scp = DN_SK(sk);
913 struct dst_entry *dst;
915 if (sock->state == SS_CONNECTED)
918 if (sock->state == SS_CONNECTING) {
920 if (scp->state == DN_RUN) {
921 sock->state = SS_CONNECTED;
925 if (scp->state != DN_CI && scp->state != DN_CC) {
926 sock->state = SS_UNCONNECTED;
929 return dn_wait_run(sk, timeo);
933 if (scp->state != DN_O)
936 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
938 if (addr->sdn_family != AF_DECnet)
940 if (addr->sdn_flags & SDF_WILD)
943 if (sock_flag(sk, SOCK_ZAPPED)) {
944 err = dn_auto_bind(sk->sk_socket);
949 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
952 memset(&fld, 0, sizeof(fld));
953 fld.flowidn_oif = sk->sk_bound_dev_if;
954 fld.daddr = dn_saddr2dn(&scp->peer);
955 fld.saddr = dn_saddr2dn(&scp->addr);
956 dn_sk_ports_copy(&fld, scp);
957 fld.flowidn_proto = DNPROTO_NSP;
958 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
960 dst = __sk_dst_get(sk);
961 sk->sk_route_caps = dst->dev->features;
962 sock->state = SS_CONNECTING;
964 scp->segsize_loc = dst_metric_advmss(dst);
966 dn_nsp_send_conninit(sk, NSP_CI);
969 err = dn_wait_run(sk, timeo);
975 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
977 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
978 struct sock *sk = sock->sk;
980 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
983 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
989 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
991 struct dn_scp *scp = DN_SK(sk);
993 switch (scp->state) {
997 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
1000 return dn_wait_run(sk, timeo);
1002 return __dn_connect(sk, addr, addrlen, timeo, flags);
1009 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1011 unsigned char *ptr = skb->data;
1013 acc->acc_userl = *ptr++;
1014 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1015 ptr += acc->acc_userl;
1017 acc->acc_passl = *ptr++;
1018 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1019 ptr += acc->acc_passl;
1021 acc->acc_accl = *ptr++;
1022 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1024 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1028 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1030 unsigned char *ptr = skb->data;
1031 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1033 BUG_ON(len > 16); /* we've checked the contents earlier */
1034 opt->opt_optl = cpu_to_le16(len);
1035 opt->opt_status = 0;
1036 memcpy(opt->opt_data, ptr, len);
1037 skb_pull(skb, len + 1);
1040 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1043 struct sk_buff *skb = NULL;
1046 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1049 skb = skb_dequeue(&sk->sk_receive_queue);
1051 *timeo = schedule_timeout(*timeo);
1052 skb = skb_dequeue(&sk->sk_receive_queue);
1058 if (sk->sk_state != TCP_LISTEN)
1060 err = sock_intr_errno(*timeo);
1061 if (signal_pending(current))
1066 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1068 finish_wait(sk_sleep(sk), &wait);
1070 return skb == NULL ? ERR_PTR(err) : skb;
1073 static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1076 struct sock *sk = sock->sk, *newsk;
1077 struct sk_buff *skb = NULL;
1078 struct dn_skb_cb *cb;
1079 unsigned char menuver;
1082 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1083 struct dst_entry *dst;
1087 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1092 skb = skb_dequeue(&sk->sk_receive_queue);
1094 skb = dn_wait_for_connect(sk, &timeo);
1097 return PTR_ERR(skb);
1101 cb = DN_SKB_CB(skb);
1102 sk->sk_ack_backlog--;
1103 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1104 if (newsk == NULL) {
1112 sk_dst_set(newsk, dst);
1113 skb_dst_set(skb, NULL);
1115 DN_SK(newsk)->state = DN_CR;
1116 DN_SK(newsk)->addrrem = cb->src_port;
1117 DN_SK(newsk)->services_rem = cb->services;
1118 DN_SK(newsk)->info_rem = cb->info;
1119 DN_SK(newsk)->segsize_rem = cb->segsize;
1120 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1122 if (DN_SK(newsk)->segsize_rem < 230)
1123 DN_SK(newsk)->segsize_rem = 230;
1125 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1126 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1128 newsk->sk_state = TCP_LISTEN;
1129 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1132 * If we are listening on a wild socket, we don't want
1133 * the newly created socket on the wrong hash queue.
1135 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1137 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1138 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1139 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1140 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1142 menuver = *skb->data;
1145 if (menuver & DN_MENUVER_ACC)
1146 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1148 if (menuver & DN_MENUVER_USR)
1149 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1151 if (menuver & DN_MENUVER_PRX)
1152 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1154 if (menuver & DN_MENUVER_UIC)
1155 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1159 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1160 sizeof(struct optdata_dn));
1161 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1162 sizeof(struct optdata_dn));
1165 err = dn_hash_sock(newsk);
1167 sock_reset_flag(newsk, SOCK_ZAPPED);
1168 dn_send_conn_ack(newsk);
1171 * Here we use sk->sk_allocation since although the conn conf is
1172 * for the newsk, the context is the old socket.
1174 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1175 err = dn_confirm_accept(newsk, &timeo,
1178 release_sock(newsk);
1183 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
1185 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1186 struct sock *sk = sock->sk;
1187 struct dn_scp *scp = DN_SK(sk);
1192 if ((sock->state != SS_CONNECTED &&
1193 sock->state != SS_CONNECTING) &&
1194 scp->accept_mode == ACC_IMMED) {
1199 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1201 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1206 return sizeof(struct sockaddr_dn);
1210 static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1212 struct sock *sk = sock->sk;
1213 struct dn_scp *scp = DN_SK(sk);
1214 __poll_t mask = datagram_poll(file, sock, wait);
1216 if (!skb_queue_empty(&scp->other_receive_queue))
1217 mask |= EPOLLRDBAND;
1222 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1224 struct sock *sk = sock->sk;
1225 struct dn_scp *scp = DN_SK(sk);
1226 int err = -EOPNOTSUPP;
1228 struct sk_buff *skb;
1235 return dn_dev_ioctl(cmd, (void __user *)arg);
1239 val = !skb_queue_empty(&scp->other_receive_queue);
1240 if (scp->state != DN_RUN)
1246 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1249 err = put_user(amount, (int __user *)arg);
1254 skb = skb_peek(&scp->other_receive_queue);
1258 skb_queue_walk(&sk->sk_receive_queue, skb)
1262 err = put_user(amount, (int __user *)arg);
1273 static int dn_listen(struct socket *sock, int backlog)
1275 struct sock *sk = sock->sk;
1280 if (sock_flag(sk, SOCK_ZAPPED))
1283 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1286 sk->sk_max_ack_backlog = backlog;
1287 sk->sk_ack_backlog = 0;
1288 sk->sk_state = TCP_LISTEN;
1299 static int dn_shutdown(struct socket *sock, int how)
1301 struct sock *sk = sock->sk;
1302 struct dn_scp *scp = DN_SK(sk);
1303 int err = -ENOTCONN;
1307 if (sock->state == SS_UNCONNECTED)
1311 if (sock->state == SS_DISCONNECTING)
1315 if (scp->state == DN_O)
1318 if (how != SHUT_RDWR)
1321 sk->sk_shutdown = SHUTDOWN_MASK;
1322 dn_destroy_sock(sk);
1331 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1333 struct sock *sk = sock->sk;
1337 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1339 #ifdef CONFIG_NETFILTER
1340 /* we need to exclude all possible ENOPROTOOPTs except default case */
1341 if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
1342 optname != DSO_STREAM && optname != DSO_SEQPACKET)
1343 err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1349 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1351 struct sock *sk = sock->sk;
1352 struct dn_scp *scp = DN_SK(sk);
1355 struct optdata_dn opt;
1356 struct accessdata_dn acc;
1360 unsigned char services;
1365 if (optlen && !optval)
1368 if (optlen > sizeof(u))
1371 if (copy_from_user(&u, optval, optlen))
1376 if (sock->state == SS_CONNECTED)
1378 if ((scp->state != DN_O) && (scp->state != DN_CR))
1381 if (optlen != sizeof(struct optdata_dn))
1384 if (le16_to_cpu(u.opt.opt_optl) > 16)
1387 memcpy(&scp->conndata_out, &u.opt, optlen);
1391 if (sock->state != SS_CONNECTED &&
1392 scp->accept_mode == ACC_IMMED)
1395 if (optlen != sizeof(struct optdata_dn))
1398 if (le16_to_cpu(u.opt.opt_optl) > 16)
1401 memcpy(&scp->discdata_out, &u.opt, optlen);
1405 if (sock->state == SS_CONNECTED)
1407 if (scp->state != DN_O)
1410 if (optlen != sizeof(struct accessdata_dn))
1413 if ((u.acc.acc_accl > DN_MAXACCL) ||
1414 (u.acc.acc_passl > DN_MAXACCL) ||
1415 (u.acc.acc_userl > DN_MAXACCL))
1418 memcpy(&scp->accessdata, &u.acc, optlen);
1421 case DSO_ACCEPTMODE:
1422 if (sock->state == SS_CONNECTED)
1424 if (scp->state != DN_O)
1427 if (optlen != sizeof(int))
1430 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1433 scp->accept_mode = (unsigned char)u.mode;
1437 if (scp->state != DN_CR)
1439 timeo = sock_rcvtimeo(sk, 0);
1440 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1444 if (scp->state != DN_CR)
1448 sk->sk_shutdown = SHUTDOWN_MASK;
1449 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1453 if (optlen != sizeof(unsigned long))
1455 if (u.win > NSP_MAX_WINDOW)
1456 u.win = NSP_MAX_WINDOW;
1459 scp->max_window = u.win;
1460 if (scp->snd_window > u.win)
1461 scp->snd_window = u.win;
1465 if (optlen != sizeof(int))
1467 if (scp->nonagle == TCP_NAGLE_CORK)
1469 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
1470 /* if (scp->nonagle == 1) { Push pending frames } */
1474 if (optlen != sizeof(int))
1476 if (scp->nonagle == TCP_NAGLE_OFF)
1478 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
1479 /* if (scp->nonagle == 0) { Push pending frames } */
1483 if (optlen != sizeof(unsigned char))
1485 if ((u.services & ~NSP_FC_MASK) != 0x01)
1487 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1489 scp->services_loc = u.services;
1493 if (optlen != sizeof(unsigned char))
1497 scp->info_loc = u.info;
1504 return -ENOPROTOOPT;
1510 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1512 struct sock *sk = sock->sk;
1516 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1518 #ifdef CONFIG_NETFILTER
1519 if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1520 optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1521 optname != DSO_CONREJECT) {
1524 if (get_user(len, optlen))
1527 err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1529 err = put_user(len, optlen);
1536 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1538 struct sock *sk = sock->sk;
1539 struct dn_scp *scp = DN_SK(sk);
1540 struct linkinfo_dn link;
1542 void *r_data = NULL;
1545 if(get_user(r_len , optlen))
1550 if (r_len > sizeof(struct optdata_dn))
1551 r_len = sizeof(struct optdata_dn);
1552 r_data = &scp->conndata_in;
1556 if (r_len > sizeof(struct optdata_dn))
1557 r_len = sizeof(struct optdata_dn);
1558 r_data = &scp->discdata_in;
1562 if (r_len > sizeof(struct accessdata_dn))
1563 r_len = sizeof(struct accessdata_dn);
1564 r_data = &scp->accessdata;
1567 case DSO_ACCEPTMODE:
1568 if (r_len > sizeof(unsigned char))
1569 r_len = sizeof(unsigned char);
1570 r_data = &scp->accept_mode;
1574 if (r_len > sizeof(struct linkinfo_dn))
1575 r_len = sizeof(struct linkinfo_dn);
1577 memset(&link, 0, sizeof(link));
1579 switch (sock->state) {
1581 link.idn_linkstate = LL_CONNECTING;
1583 case SS_DISCONNECTING:
1584 link.idn_linkstate = LL_DISCONNECTING;
1587 link.idn_linkstate = LL_RUNNING;
1590 link.idn_linkstate = LL_INACTIVE;
1593 link.idn_segsize = scp->segsize_rem;
1598 if (r_len > sizeof(unsigned long))
1599 r_len = sizeof(unsigned long);
1600 r_data = &scp->max_window;
1604 if (r_len > sizeof(int))
1605 r_len = sizeof(int);
1606 val = (scp->nonagle == TCP_NAGLE_OFF);
1611 if (r_len > sizeof(int))
1612 r_len = sizeof(int);
1613 val = (scp->nonagle == TCP_NAGLE_CORK);
1618 if (r_len > sizeof(unsigned char))
1619 r_len = sizeof(unsigned char);
1620 r_data = &scp->services_rem;
1624 if (r_len > sizeof(unsigned char))
1625 r_len = sizeof(unsigned char);
1626 r_data = &scp->info_rem;
1634 return -ENOPROTOOPT;
1638 if (copy_to_user(optval, r_data, r_len))
1640 if (put_user(r_len, optlen))
1648 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1650 struct sk_buff *skb;
1653 if (flags & MSG_OOB)
1654 return !skb_queue_empty(q) ? 1 : 0;
1656 skb_queue_walk(q, skb) {
1657 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1660 if (cb->nsp_flags & 0x40) {
1661 /* SOCK_SEQPACKET reads to EOM */
1662 if (sk->sk_type == SOCK_SEQPACKET)
1664 /* so does SOCK_STREAM unless WAITALL is specified */
1665 if (!(flags & MSG_WAITALL))
1669 /* minimum data length for read exceeded */
1678 static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1681 struct sock *sk = sock->sk;
1682 struct dn_scp *scp = DN_SK(sk);
1683 struct sk_buff_head *queue = &sk->sk_receive_queue;
1684 size_t target = size > 1 ? 1 : 0;
1687 struct sk_buff *skb, *n;
1688 struct dn_skb_cb *cb = NULL;
1689 unsigned char eor = 0;
1690 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1694 if (sock_flag(sk, SOCK_ZAPPED)) {
1695 rv = -EADDRNOTAVAIL;
1699 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1704 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1708 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1713 if (flags & MSG_OOB)
1714 queue = &scp->other_receive_queue;
1716 if (flags & MSG_WAITALL)
1721 * See if there is data ready to read, sleep if there isn't
1724 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1729 if (!skb_queue_empty(&scp->other_receive_queue)) {
1730 if (!(flags & MSG_OOB)) {
1731 msg->msg_flags |= MSG_OOB;
1732 if (!scp->other_report) {
1733 scp->other_report = 1;
1739 if (scp->state != DN_RUN)
1742 if (signal_pending(current)) {
1743 rv = sock_intr_errno(timeo);
1747 if (dn_data_ready(sk, queue, flags, target))
1750 if (flags & MSG_DONTWAIT) {
1755 add_wait_queue(sk_sleep(sk), &wait);
1756 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1757 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
1758 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1759 remove_wait_queue(sk_sleep(sk), &wait);
1762 skb_queue_walk_safe(queue, skb, n) {
1763 unsigned int chunk = skb->len;
1764 cb = DN_SKB_CB(skb);
1766 if ((chunk + copied) > size)
1767 chunk = size - copied;
1769 if (memcpy_to_msg(msg, skb->data, chunk)) {
1775 if (!(flags & MSG_PEEK))
1776 skb_pull(skb, chunk);
1778 eor = cb->nsp_flags & 0x40;
1780 if (skb->len == 0) {
1781 skb_unlink(skb, queue);
1784 * N.B. Don't refer to skb or cb after this point
1787 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1788 scp->flowloc_sw = DN_SEND;
1789 dn_nsp_send_link(sk, DN_SEND, 0);
1794 if (sk->sk_type == SOCK_SEQPACKET)
1796 if (!(flags & MSG_WAITALL))
1800 if (flags & MSG_OOB)
1803 if (copied >= target)
1810 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1811 msg->msg_flags |= MSG_EOR;
1815 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1817 if ((rv >= 0) && msg->msg_name) {
1818 __sockaddr_check_size(sizeof(struct sockaddr_dn));
1819 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1820 msg->msg_namelen = sizeof(struct sockaddr_dn);
1829 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1831 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1832 if (skb_queue_len(queue) >= scp->snd_window)
1834 if (fctype != NSP_FC_NONE) {
1835 if (flags & MSG_OOB) {
1836 if (scp->flowrem_oth == 0)
1839 if (scp->flowrem_dat == 0)
1847 * The DECnet spec requires that the "routing layer" accepts packets which
1848 * are at least 230 bytes in size. This excludes any headers which the NSP
1849 * layer might add, so we always assume that we'll be using the maximal
1850 * length header on data packets. The variation in length is due to the
1851 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1852 * make much practical difference.
1854 unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
1856 unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
1858 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1859 mtu -= LL_RESERVED_SPACE(dev);
1860 if (dn_db->use_long)
1864 mtu -= DN_MAX_NSP_DATA_HEADER;
1867 * 21 = long header, 16 = guess at MAC header length
1869 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1876 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1878 struct dst_entry *dst = __sk_dst_get(sk);
1879 struct dn_scp *scp = DN_SK(sk);
1880 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1882 /* Other data messages are limited to 16 bytes per packet */
1883 if (flags & MSG_OOB)
1886 /* This works out the maximum size of segment we can send out */
1888 u32 mtu = dst_mtu(dst);
1889 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1896 * N.B. We get the timeout wrong here, but then we always did get it
1897 * wrong before and this is another step along the road to correcting
1898 * it. It ought to get updated each time we pass through the routine,
1899 * but in practise it probably doesn't matter too much for now.
1901 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1902 unsigned long datalen, int noblock,
1905 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1908 skb->protocol = htons(ETH_P_DNA_RT);
1909 skb->pkt_type = PACKET_OUTGOING;
1914 static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1916 struct sock *sk = sock->sk;
1917 struct dn_scp *scp = DN_SK(sk);
1919 struct sk_buff_head *queue = &scp->data_xmit_queue;
1920 int flags = msg->msg_flags;
1923 int addr_len = msg->msg_namelen;
1924 DECLARE_SOCKADDR(struct sockaddr_dn *, addr, msg->msg_name);
1925 struct sk_buff *skb = NULL;
1926 struct dn_skb_cb *cb;
1928 unsigned char fctype;
1931 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1934 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1938 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1940 * The only difference between stream sockets and sequenced packet
1941 * sockets is that the stream sockets always behave as if MSG_EOR
1944 if (sock->type == SOCK_STREAM) {
1945 if (flags & MSG_EOR) {
1953 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1957 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1959 if (!(flags & MSG_NOSIGNAL))
1960 send_sig(SIGPIPE, current, 0);
1964 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1965 dst_negative_advice(sk);
1967 mss = scp->segsize_rem;
1968 fctype = scp->services_rem & NSP_FC_MASK;
1970 mss = dn_current_mss(sk, flags);
1972 if (flags & MSG_OOB) {
1973 queue = &scp->other_xmit_queue;
1980 scp->persist_fxn = dn_nsp_xmit_timeout;
1982 while(sent < size) {
1983 err = sock_error(sk);
1987 if (signal_pending(current)) {
1988 err = sock_intr_errno(timeo);
1993 * Calculate size that we wish to send.
2001 * Wait for queue size to go down below the window
2004 if (dn_queue_too_long(scp, queue, flags)) {
2005 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2007 if (flags & MSG_DONTWAIT) {
2012 add_wait_queue(sk_sleep(sk), &wait);
2013 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2014 sk_wait_event(sk, &timeo,
2015 !dn_queue_too_long(scp, queue, flags), &wait);
2016 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2017 remove_wait_queue(sk_sleep(sk), &wait);
2022 * Get a suitably sized skb.
2023 * 64 is a bit of a hack really, but its larger than any
2024 * link-layer headers and has served us well as a good
2025 * guess as to their real length.
2027 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2028 flags & MSG_DONTWAIT, &err);
2036 cb = DN_SKB_CB(skb);
2038 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2040 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2045 if (flags & MSG_OOB) {
2046 cb->nsp_flags = 0x30;
2047 if (fctype != NSP_FC_NONE)
2050 cb->nsp_flags = 0x00;
2051 if (scp->seg_total == 0)
2052 cb->nsp_flags |= 0x20;
2054 scp->seg_total += len;
2056 if (((sent + len) == size) && (flags & MSG_EOR)) {
2057 cb->nsp_flags |= 0x40;
2059 if (fctype == NSP_FC_SCMC)
2062 if (fctype == NSP_FC_SRC)
2067 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2070 scp->persist = dn_nsp_persist(sk);
2079 return sent ? sent : err;
2082 err = sk_stream_error(sk, flags, err);
2087 static int dn_device_event(struct notifier_block *this, unsigned long event,
2090 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2092 if (!net_eq(dev_net(dev), &init_net))
2109 static struct notifier_block dn_dev_notifier = {
2110 .notifier_call = dn_device_event,
2113 static struct packet_type dn_dix_packet_type __read_mostly = {
2114 .type = cpu_to_be16(ETH_P_DNA_RT),
2115 .func = dn_route_rcv,
2118 #ifdef CONFIG_PROC_FS
2119 struct dn_iter_state {
2123 static struct sock *dn_socket_get_first(struct seq_file *seq)
2125 struct dn_iter_state *state = seq->private;
2126 struct sock *n = NULL;
2128 for(state->bucket = 0;
2129 state->bucket < DN_SK_HASH_SIZE;
2131 n = sk_head(&dn_sk_hash[state->bucket]);
2139 static struct sock *dn_socket_get_next(struct seq_file *seq,
2142 struct dn_iter_state *state = seq->private;
2148 if (++state->bucket >= DN_SK_HASH_SIZE)
2150 n = sk_head(&dn_sk_hash[state->bucket]);
2156 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2158 struct sock *sk = dn_socket_get_first(seq);
2161 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2164 return *pos ? NULL : sk;
2167 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2170 read_lock_bh(&dn_hash_lock);
2171 rc = socket_get_idx(seq, &pos);
2173 read_unlock_bh(&dn_hash_lock);
2178 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2180 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2183 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2187 if (v == SEQ_START_TOKEN) {
2188 rc = dn_socket_get_idx(seq, 0);
2192 rc = dn_socket_get_next(seq, v);
2195 read_unlock_bh(&dn_hash_lock);
2201 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2203 if (v && v != SEQ_START_TOKEN)
2204 read_unlock_bh(&dn_hash_lock);
2207 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2209 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2213 switch (le16_to_cpu(dn->sdn_objnamel)) {
2215 sprintf(buf, "%d", dn->sdn_objnum);
2218 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2219 buf[i] = dn->sdn_objname[i];
2220 if (IS_NOT_PRINTABLE(buf[i]))
2227 static char *dn_state2asc(unsigned char state)
2267 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2269 struct dn_scp *scp = DN_SK(sk);
2270 char buf1[DN_ASCBUF_LEN];
2271 char buf2[DN_ASCBUF_LEN];
2272 char local_object[DN_MAXOBJL+3];
2273 char remote_object[DN_MAXOBJL+3];
2275 dn_printable_object(&scp->addr, local_object);
2276 dn_printable_object(&scp->peer, remote_object);
2279 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2280 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2281 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2289 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2297 dn_state2asc(scp->state),
2298 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2301 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2303 if (v == SEQ_START_TOKEN) {
2304 seq_puts(seq, "Local Remote\n");
2306 dn_socket_format_entry(seq, v);
2311 static const struct seq_operations dn_socket_seq_ops = {
2312 .start = dn_socket_seq_start,
2313 .next = dn_socket_seq_next,
2314 .stop = dn_socket_seq_stop,
2315 .show = dn_socket_seq_show,
2319 static const struct net_proto_family dn_family_ops = {
2320 .family = AF_DECnet,
2321 .create = dn_create,
2322 .owner = THIS_MODULE,
2325 static const struct proto_ops dn_proto_ops = {
2326 .family = AF_DECnet,
2327 .owner = THIS_MODULE,
2328 .release = dn_release,
2330 .connect = dn_connect,
2331 .socketpair = sock_no_socketpair,
2332 .accept = dn_accept,
2333 .getname = dn_getname,
2336 .listen = dn_listen,
2337 .shutdown = dn_shutdown,
2338 .setsockopt = dn_setsockopt,
2339 .getsockopt = dn_getsockopt,
2340 .sendmsg = dn_sendmsg,
2341 .recvmsg = dn_recvmsg,
2342 .mmap = sock_no_mmap,
2343 .sendpage = sock_no_sendpage,
2346 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2347 MODULE_AUTHOR("Linux DECnet Project Team");
2348 MODULE_LICENSE("GPL");
2349 MODULE_ALIAS_NETPROTO(PF_DECnet);
2351 static const char banner[] __initconst = KERN_INFO
2352 "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2354 static int __init decnet_init(void)
2360 rc = proto_register(&dn_proto, 1);
2369 sock_register(&dn_family_ops);
2370 dev_add_pack(&dn_dix_packet_type);
2371 register_netdevice_notifier(&dn_dev_notifier);
2373 proc_create_seq_private("decnet", 0444, init_net.proc_net,
2374 &dn_socket_seq_ops, sizeof(struct dn_iter_state),
2376 dn_register_sysctl();
2381 module_init(decnet_init);
2384 * Prevent DECnet module unloading until its fixed properly.
2385 * Requires an audit of the code to check for memory leaks and
2386 * initialisation problems etc.
2389 static void __exit decnet_exit(void)
2391 sock_unregister(AF_DECnet);
2392 rtnl_unregister_all(PF_DECnet);
2393 dev_remove_pack(&dn_dix_packet_type);
2395 dn_unregister_sysctl();
2397 unregister_netdevice_notifier(&dn_dev_notifier);
2404 remove_proc_entry("decnet", init_net.proc_net);
2406 proto_unregister(&dn_proto);
2408 rcu_barrier(); /* Wait for completion of call_rcu()'s */
2410 module_exit(decnet_exit);