2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #include <linux/smp_lock.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
27 #include <asm/unaligned.h>
30 * We maintain a small pool of fully-sized skbs, to make sure the
31 * message gets out even in extreme OOM situations.
34 #define MAX_UDP_CHUNK 1460
36 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
38 static struct sk_buff_head skb_pool;
40 static atomic_t trapped;
42 #define USEC_PER_POLL 50
43 #define NETPOLL_RX_ENABLED 1
44 #define NETPOLL_RX_DROP 2
46 #define MAX_SKB_SIZE \
47 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
48 sizeof(struct iphdr) + sizeof(struct ethhdr))
50 static void zap_completion_queue(void);
51 static void arp_reply(struct sk_buff *skb);
53 static void queue_process(struct work_struct *work)
55 struct netpoll_info *npinfo =
56 container_of(work, struct netpoll_info, tx_work.work);
59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev;
62 if (!netif_device_present(dev) || !netif_running(dev)) {
67 netif_tx_lock_bh(dev);
68 if (netif_queue_stopped(dev) ||
69 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
70 skb_queue_head(&npinfo->txq, skb);
71 netif_tx_unlock_bh(dev);
73 schedule_delayed_work(&npinfo->tx_work, HZ/10);
79 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
80 unsigned short ulen, __be32 saddr, __be32 daddr)
84 if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
87 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
89 if (skb->ip_summed == CHECKSUM_COMPLETE &&
90 !csum_fold(csum_add(psum, skb->csum)))
95 return __skb_checksum_complete(skb);
99 * Check whether delayed processing was scheduled for our NIC. If so,
100 * we attempt to grab the poll lock and use ->poll() to pump the card.
101 * If this fails, either we've recursed in ->poll() or it's already
102 * running on another CPU.
104 * Note: we don't mask interrupts with this lock because we're using
105 * trylock here and interrupts are already disabled in the softirq
106 * case. Further, we test the poll_owner to avoid recursion on UP
107 * systems where the lock doesn't exist.
109 * In cases where there is bi-directional communications, reading only
110 * one message at a time can lead to packets being dropped by the
111 * network adapter, forcing superfluous retries and possibly timeouts.
112 * Thus, we set our budget to greater than 1.
114 static void poll_napi(struct netpoll *np)
116 struct netpoll_info *npinfo = np->dev->npinfo;
119 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
120 npinfo->poll_owner != smp_processor_id() &&
121 spin_trylock(&npinfo->poll_lock)) {
122 npinfo->rx_flags |= NETPOLL_RX_DROP;
123 atomic_inc(&trapped);
125 np->dev->poll(np->dev, &budget);
127 atomic_dec(&trapped);
128 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
129 spin_unlock(&npinfo->poll_lock);
133 static void service_arp_queue(struct netpoll_info *npi)
140 skb = skb_dequeue(&npi->arp_tx);
142 while (skb != NULL) {
144 skb = skb_dequeue(&npi->arp_tx);
148 void netpoll_poll(struct netpoll *np)
150 if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
153 /* Process pending work on NIC */
154 np->dev->poll_controller(np->dev);
158 service_arp_queue(np->dev->npinfo);
160 zap_completion_queue();
163 static void refill_skbs(void)
168 spin_lock_irqsave(&skb_pool.lock, flags);
169 while (skb_pool.qlen < MAX_SKBS) {
170 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
174 __skb_queue_tail(&skb_pool, skb);
176 spin_unlock_irqrestore(&skb_pool.lock, flags);
179 static void zap_completion_queue(void)
182 struct softnet_data *sd = &get_cpu_var(softnet_data);
184 if (sd->completion_queue) {
185 struct sk_buff *clist;
187 local_irq_save(flags);
188 clist = sd->completion_queue;
189 sd->completion_queue = NULL;
190 local_irq_restore(flags);
192 while (clist != NULL) {
193 struct sk_buff *skb = clist;
196 dev_kfree_skb_any(skb); /* put this one back */
202 put_cpu_var(softnet_data);
205 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
210 zap_completion_queue();
214 skb = alloc_skb(len, GFP_ATOMIC);
216 skb = skb_dequeue(&skb_pool);
226 atomic_set(&skb->users, 1);
227 skb_reserve(skb, reserve);
231 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
233 int status = NETDEV_TX_BUSY;
235 struct net_device *dev = np->dev;
236 struct netpoll_info *npinfo = np->dev->npinfo;
238 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
243 /* don't get messages out of order, and no recursion */
244 if (skb_queue_len(&npinfo->txq) == 0 &&
245 npinfo->poll_owner != smp_processor_id() &&
246 netif_tx_trylock(dev)) {
247 /* try until next clock tick */
248 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
249 if (!netif_queue_stopped(dev))
250 status = dev->hard_start_xmit(skb, dev);
252 if (status == NETDEV_TX_OK)
255 /* tickle device maybe there is some cleanup */
258 udelay(USEC_PER_POLL);
260 netif_tx_unlock(dev);
263 if (status != NETDEV_TX_OK) {
264 skb_queue_tail(&npinfo->txq, skb);
265 schedule_delayed_work(&npinfo->tx_work,0);
269 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
271 int total_len, eth_len, ip_len, udp_len;
277 udp_len = len + sizeof(*udph);
278 ip_len = eth_len = udp_len + sizeof(*iph);
279 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
281 skb = find_skb(np, total_len, total_len - len);
285 memcpy(skb->data, msg, len);
288 skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
289 udph->source = htons(np->local_port);
290 udph->dest = htons(np->remote_port);
291 udph->len = htons(udp_len);
293 udph->check = csum_tcpudp_magic(htonl(np->local_ip),
294 htonl(np->remote_ip),
295 udp_len, IPPROTO_UDP,
296 csum_partial((unsigned char *)udph, udp_len, 0));
297 if (udph->check == 0)
298 udph->check = CSUM_MANGLED_0;
300 skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
302 /* iph->version = 4; iph->ihl = 5; */
303 put_unaligned(0x45, (unsigned char *)iph);
305 put_unaligned(htons(ip_len), &(iph->tot_len));
309 iph->protocol = IPPROTO_UDP;
311 put_unaligned(htonl(np->local_ip), &(iph->saddr));
312 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
313 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
315 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
316 skb->mac.raw = skb->data;
317 skb->protocol = eth->h_proto = htons(ETH_P_IP);
318 memcpy(eth->h_source, np->local_mac, 6);
319 memcpy(eth->h_dest, np->remote_mac, 6);
323 netpoll_send_skb(np, skb);
326 static void arp_reply(struct sk_buff *skb)
328 struct netpoll_info *npinfo = skb->dev->npinfo;
330 unsigned char *arp_ptr;
331 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
334 struct sk_buff *send_skb;
335 struct netpoll *np = NULL;
337 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
342 /* No arp on this interface */
343 if (skb->dev->flags & IFF_NOARP)
346 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
347 (2 * skb->dev->addr_len) +
351 skb->h.raw = skb->nh.raw = skb->data;
354 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
355 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
356 arp->ar_pro != htons(ETH_P_IP) ||
357 arp->ar_op != htons(ARPOP_REQUEST))
360 arp_ptr = (unsigned char *)(arp+1);
361 /* save the location of the src hw addr */
363 arp_ptr += skb->dev->addr_len;
364 memcpy(&sip, arp_ptr, 4);
366 /* if we actually cared about dst hw addr, it would get copied here */
367 arp_ptr += skb->dev->addr_len;
368 memcpy(&tip, arp_ptr, 4);
370 /* Should we ignore arp? */
371 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
374 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
375 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
376 LL_RESERVED_SPACE(np->dev));
381 send_skb->nh.raw = send_skb->data;
382 arp = (struct arphdr *) skb_put(send_skb, size);
383 send_skb->dev = skb->dev;
384 send_skb->protocol = htons(ETH_P_ARP);
386 /* Fill the device header for the ARP frame */
388 if (np->dev->hard_header &&
389 np->dev->hard_header(send_skb, skb->dev, ptype,
391 send_skb->len) < 0) {
397 * Fill out the arp protocol part.
399 * we only support ethernet device type,
400 * which (according to RFC 1390) should always equal 1 (Ethernet).
403 arp->ar_hrd = htons(np->dev->type);
404 arp->ar_pro = htons(ETH_P_IP);
405 arp->ar_hln = np->dev->addr_len;
407 arp->ar_op = htons(type);
409 arp_ptr=(unsigned char *)(arp + 1);
410 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
411 arp_ptr += np->dev->addr_len;
412 memcpy(arp_ptr, &tip, 4);
414 memcpy(arp_ptr, sha, np->dev->addr_len);
415 arp_ptr += np->dev->addr_len;
416 memcpy(arp_ptr, &sip, 4);
418 netpoll_send_skb(np, send_skb);
421 int __netpoll_rx(struct sk_buff *skb)
423 int proto, len, ulen;
426 struct netpoll_info *npi = skb->dev->npinfo;
427 struct netpoll *np = npi->rx_np;
431 if (skb->dev->type != ARPHRD_ETHER)
434 /* check if netpoll clients need ARP */
435 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
436 atomic_read(&trapped)) {
437 skb_queue_tail(&npi->arp_tx, skb);
441 proto = ntohs(eth_hdr(skb)->h_proto);
442 if (proto != ETH_P_IP)
444 if (skb->pkt_type == PACKET_OTHERHOST)
449 iph = (struct iphdr *)skb->data;
450 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
452 if (iph->ihl < 5 || iph->version != 4)
454 if (!pskb_may_pull(skb, iph->ihl*4))
456 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
459 len = ntohs(iph->tot_len);
460 if (skb->len < len || len < iph->ihl*4)
463 if (iph->protocol != IPPROTO_UDP)
467 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
468 ulen = ntohs(uh->len);
472 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
474 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
476 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
478 if (np->local_port && np->local_port != ntohs(uh->dest))
481 np->rx_hook(np, ntohs(uh->source),
483 ulen - sizeof(struct udphdr));
489 if (atomic_read(&trapped)) {
497 int netpoll_parse_options(struct netpoll *np, char *opt)
499 char *cur=opt, *delim;
502 if ((delim = strchr(cur, '@')) == NULL)
505 np->local_port = simple_strtol(cur, NULL, 10);
509 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
512 if ((delim = strchr(cur, '/')) == NULL)
515 np->local_ip = ntohl(in_aton(cur));
518 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
519 np->name, HIPQUAD(np->local_ip));
524 /* parse out dev name */
525 if ((delim = strchr(cur, ',')) == NULL)
528 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
533 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
537 if ((delim = strchr(cur, '@')) == NULL)
540 np->remote_port = simple_strtol(cur, NULL, 10);
544 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
547 if ((delim = strchr(cur, '/')) == NULL)
550 np->remote_ip = ntohl(in_aton(cur));
553 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
554 np->name, HIPQUAD(np->remote_ip));
558 if ((delim = strchr(cur, ':')) == NULL)
561 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
563 if ((delim = strchr(cur, ':')) == NULL)
566 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
568 if ((delim = strchr(cur, ':')) == NULL)
571 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
573 if ((delim = strchr(cur, ':')) == NULL)
576 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
578 if ((delim = strchr(cur, ':')) == NULL)
581 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
583 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
586 printk(KERN_INFO "%s: remote ethernet address "
587 "%02x:%02x:%02x:%02x:%02x:%02x\n",
599 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
604 int netpoll_setup(struct netpoll *np)
606 struct net_device *ndev = NULL;
607 struct in_device *in_dev;
608 struct netpoll_info *npinfo;
613 ndev = dev_get_by_name(np->dev_name);
615 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
616 np->name, np->dev_name);
622 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
628 npinfo->rx_flags = 0;
629 npinfo->rx_np = NULL;
630 spin_lock_init(&npinfo->poll_lock);
631 npinfo->poll_owner = -1;
633 spin_lock_init(&npinfo->rx_lock);
634 skb_queue_head_init(&npinfo->arp_tx);
635 skb_queue_head_init(&npinfo->txq);
636 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
638 atomic_set(&npinfo->refcnt, 1);
640 npinfo = ndev->npinfo;
641 atomic_inc(&npinfo->refcnt);
644 if (!ndev->poll_controller) {
645 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
646 np->name, np->dev_name);
651 if (!netif_running(ndev)) {
652 unsigned long atmost, atleast;
654 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
655 np->name, np->dev_name);
658 err = dev_open(ndev);
662 printk(KERN_ERR "%s: failed to open %s\n",
663 np->name, ndev->name);
667 atleast = jiffies + HZ/10;
668 atmost = jiffies + 4*HZ;
669 while (!netif_carrier_ok(ndev)) {
670 if (time_after(jiffies, atmost)) {
672 "%s: timeout waiting for carrier\n",
679 /* If carrier appears to come up instantly, we don't
680 * trust it and pause so that we don't pump all our
681 * queued console messages into the bitbucket.
684 if (time_before(jiffies, atleast)) {
685 printk(KERN_NOTICE "%s: carrier detect appears"
686 " untrustworthy, waiting 4 seconds\n",
692 if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
693 memcpy(np->local_mac, ndev->dev_addr, 6);
697 in_dev = __in_dev_get_rcu(ndev);
699 if (!in_dev || !in_dev->ifa_list) {
701 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
702 np->name, np->dev_name);
707 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
709 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
710 np->name, HIPQUAD(np->local_ip));
714 spin_lock_irqsave(&npinfo->rx_lock, flags);
715 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
717 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
720 /* fill up the skb queue */
723 /* last thing to do is link it to the net device structure */
724 ndev->npinfo = npinfo;
726 /* avoid racing with NAPI reading npinfo */
739 static int __init netpoll_init(void)
741 skb_queue_head_init(&skb_pool);
744 core_initcall(netpoll_init);
746 void netpoll_cleanup(struct netpoll *np)
748 struct netpoll_info *npinfo;
752 npinfo = np->dev->npinfo;
754 if (npinfo->rx_np == np) {
755 spin_lock_irqsave(&npinfo->rx_lock, flags);
756 npinfo->rx_np = NULL;
757 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
758 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
761 np->dev->npinfo = NULL;
762 if (atomic_dec_and_test(&npinfo->refcnt)) {
763 skb_queue_purge(&npinfo->arp_tx);
764 skb_queue_purge(&npinfo->txq);
765 cancel_rearming_delayed_work(&npinfo->tx_work);
766 flush_scheduled_work();
778 int netpoll_trap(void)
780 return atomic_read(&trapped);
783 void netpoll_set_trap(int trap)
786 atomic_inc(&trapped);
788 atomic_dec(&trapped);
791 EXPORT_SYMBOL(netpoll_set_trap);
792 EXPORT_SYMBOL(netpoll_trap);
793 EXPORT_SYMBOL(netpoll_parse_options);
794 EXPORT_SYMBOL(netpoll_setup);
795 EXPORT_SYMBOL(netpoll_cleanup);
796 EXPORT_SYMBOL(netpoll_send_udp);
797 EXPORT_SYMBOL(netpoll_poll);