2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool;
49 DEFINE_STATIC_SRCU(netpoll_srcu);
51 #define USEC_PER_POLL 50
53 #define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
59 static void zap_completion_queue(void);
60 static void netpoll_async_cleanup(struct work_struct *work);
62 static unsigned int carrier_timeout = 4;
63 module_param(carrier_timeout, uint, 0644);
65 #define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69 #define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72 static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
78 features = netif_skb_features(skb);
80 if (skb_vlan_tag_present(skb) &&
81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
82 skb = __vlan_hwaccel_push_inside(skb);
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
92 status = netdev_start_xmit(skb, dev, txq, false);
98 static void queue_process(struct work_struct *work)
100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq;
109 if (!netif_device_present(dev) || !netif_running(dev)) {
114 txq = skb_get_tx_queue(dev, skb);
116 local_irq_save(flags);
117 HARD_TX_LOCK(dev, txq, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq) ||
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
120 skb_queue_head(&npinfo->txq, skb);
121 HARD_TX_UNLOCK(dev, txq);
122 local_irq_restore(flags);
124 schedule_delayed_work(&npinfo->tx_work, HZ/10);
127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags);
133 * Check whether delayed processing was scheduled for our NIC. If so,
134 * we attempt to grab the poll lock and use ->poll() to pump the card.
135 * If this fails, either we've recursed in ->poll() or it's already
136 * running on another CPU.
138 * Note: we don't mask interrupts with this lock because we're using
139 * trylock here and interrupts are already disabled in the softirq
140 * case. Further, we test the poll_owner to avoid recursion on UP
141 * systems where the lock doesn't exist.
143 static void poll_one_napi(struct napi_struct *napi)
147 /* net_rx_action's ->poll() invocations and our's are
148 * synchronized by this test which is only made while
149 * holding the napi->poll_lock.
151 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
154 /* If we set this bit but see that it has already been set,
155 * that indicates that napi has been disabled and we need
156 * to abort this operation
158 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
161 /* We explicilty pass the polling call a budget of 0 to
162 * indicate that we are clearing the Tx path only.
164 work = napi->poll(napi, 0);
165 WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
166 trace_napi_poll(napi, work, 0);
168 clear_bit(NAPI_STATE_NPSVC, &napi->state);
171 static void poll_napi(struct net_device *dev)
173 struct napi_struct *napi;
175 list_for_each_entry(napi, &dev->napi_list, dev_list) {
176 if (napi->poll_owner != smp_processor_id() &&
177 spin_trylock(&napi->poll_lock)) {
179 spin_unlock(&napi->poll_lock);
184 static void netpoll_poll_dev(struct net_device *dev)
186 const struct net_device_ops *ops;
187 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
189 /* Don't do any rx activity if the dev_lock mutex is held
190 * the dev_open/close paths use this to block netpoll activity
191 * while changing device state
193 if (down_trylock(&ni->dev_lock))
196 if (!netif_running(dev)) {
201 ops = dev->netdev_ops;
202 if (!ops->ndo_poll_controller) {
207 /* Process pending work on NIC */
208 ops->ndo_poll_controller(dev);
214 zap_completion_queue();
217 void netpoll_poll_disable(struct net_device *dev)
219 struct netpoll_info *ni;
222 idx = srcu_read_lock(&netpoll_srcu);
223 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
226 srcu_read_unlock(&netpoll_srcu, idx);
228 EXPORT_SYMBOL(netpoll_poll_disable);
230 void netpoll_poll_enable(struct net_device *dev)
232 struct netpoll_info *ni;
234 ni = rcu_dereference(dev->npinfo);
239 EXPORT_SYMBOL(netpoll_poll_enable);
241 static void refill_skbs(void)
246 spin_lock_irqsave(&skb_pool.lock, flags);
247 while (skb_pool.qlen < MAX_SKBS) {
248 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
252 __skb_queue_tail(&skb_pool, skb);
254 spin_unlock_irqrestore(&skb_pool.lock, flags);
257 static void zap_completion_queue(void)
260 struct softnet_data *sd = &get_cpu_var(softnet_data);
262 if (sd->completion_queue) {
263 struct sk_buff *clist;
265 local_irq_save(flags);
266 clist = sd->completion_queue;
267 sd->completion_queue = NULL;
268 local_irq_restore(flags);
270 while (clist != NULL) {
271 struct sk_buff *skb = clist;
273 if (!skb_irq_freeable(skb)) {
274 atomic_inc(&skb->users);
275 dev_kfree_skb_any(skb); /* put this one back */
282 put_cpu_var(softnet_data);
285 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
290 zap_completion_queue();
294 skb = alloc_skb(len, GFP_ATOMIC);
296 skb = skb_dequeue(&skb_pool);
300 netpoll_poll_dev(np->dev);
306 atomic_set(&skb->users, 1);
307 skb_reserve(skb, reserve);
311 static int netpoll_owner_active(struct net_device *dev)
313 struct napi_struct *napi;
315 list_for_each_entry(napi, &dev->napi_list, dev_list) {
316 if (napi->poll_owner == smp_processor_id())
322 /* call with IRQ disabled */
323 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
324 struct net_device *dev)
326 int status = NETDEV_TX_BUSY;
328 /* It is up to the caller to keep npinfo alive. */
329 struct netpoll_info *npinfo;
331 WARN_ON_ONCE(!irqs_disabled());
333 npinfo = rcu_dereference_bh(np->dev->npinfo);
334 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
335 dev_kfree_skb_irq(skb);
339 /* don't get messages out of order, and no recursion */
340 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
341 struct netdev_queue *txq;
343 txq = netdev_pick_tx(dev, skb, NULL);
345 /* try until next clock tick */
346 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
347 tries > 0; --tries) {
348 if (HARD_TX_TRYLOCK(dev, txq)) {
349 if (!netif_xmit_stopped(txq))
350 status = netpoll_start_xmit(skb, dev, txq);
352 HARD_TX_UNLOCK(dev, txq);
354 if (status == NETDEV_TX_OK)
359 /* tickle device maybe there is some cleanup */
360 netpoll_poll_dev(np->dev);
362 udelay(USEC_PER_POLL);
365 WARN_ONCE(!irqs_disabled(),
366 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
367 dev->name, dev->netdev_ops->ndo_start_xmit);
371 if (status != NETDEV_TX_OK) {
372 skb_queue_tail(&npinfo->txq, skb);
373 schedule_delayed_work(&npinfo->tx_work,0);
376 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
378 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
380 int total_len, ip_len, udp_len;
385 static atomic_t ip_ident;
386 struct ipv6hdr *ip6h;
388 WARN_ON_ONCE(!irqs_disabled());
390 udp_len = len + sizeof(*udph);
392 ip_len = udp_len + sizeof(*ip6h);
394 ip_len = udp_len + sizeof(*iph);
396 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
398 skb = find_skb(np, total_len + np->dev->needed_tailroom,
403 skb_copy_to_linear_data(skb, msg, len);
406 skb_push(skb, sizeof(*udph));
407 skb_reset_transport_header(skb);
409 udph->source = htons(np->local_port);
410 udph->dest = htons(np->remote_port);
411 udph->len = htons(udp_len);
415 udph->check = csum_ipv6_magic(&np->local_ip.in6,
417 udp_len, IPPROTO_UDP,
418 csum_partial(udph, udp_len, 0));
419 if (udph->check == 0)
420 udph->check = CSUM_MANGLED_0;
422 skb_push(skb, sizeof(*ip6h));
423 skb_reset_network_header(skb);
424 ip6h = ipv6_hdr(skb);
426 /* ip6h->version = 6; ip6h->priority = 0; */
427 put_unaligned(0x60, (unsigned char *)ip6h);
428 ip6h->flow_lbl[0] = 0;
429 ip6h->flow_lbl[1] = 0;
430 ip6h->flow_lbl[2] = 0;
432 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
433 ip6h->nexthdr = IPPROTO_UDP;
434 ip6h->hop_limit = 32;
435 ip6h->saddr = np->local_ip.in6;
436 ip6h->daddr = np->remote_ip.in6;
438 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
439 skb_reset_mac_header(skb);
440 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
443 udph->check = csum_tcpudp_magic(np->local_ip.ip,
445 udp_len, IPPROTO_UDP,
446 csum_partial(udph, udp_len, 0));
447 if (udph->check == 0)
448 udph->check = CSUM_MANGLED_0;
450 skb_push(skb, sizeof(*iph));
451 skb_reset_network_header(skb);
454 /* iph->version = 4; iph->ihl = 5; */
455 put_unaligned(0x45, (unsigned char *)iph);
457 put_unaligned(htons(ip_len), &(iph->tot_len));
458 iph->id = htons(atomic_inc_return(&ip_ident));
461 iph->protocol = IPPROTO_UDP;
463 put_unaligned(np->local_ip.ip, &(iph->saddr));
464 put_unaligned(np->remote_ip.ip, &(iph->daddr));
465 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
467 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
468 skb_reset_mac_header(skb);
469 skb->protocol = eth->h_proto = htons(ETH_P_IP);
472 ether_addr_copy(eth->h_source, np->dev->dev_addr);
473 ether_addr_copy(eth->h_dest, np->remote_mac);
477 netpoll_send_skb(np, skb);
479 EXPORT_SYMBOL(netpoll_send_udp);
481 void netpoll_print_options(struct netpoll *np)
483 np_info(np, "local port %d\n", np->local_port);
485 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
487 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
488 np_info(np, "interface '%s'\n", np->dev_name);
489 np_info(np, "remote port %d\n", np->remote_port);
491 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
493 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
494 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
496 EXPORT_SYMBOL(netpoll_print_options);
498 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
502 if (!strchr(str, ':') &&
503 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
507 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
508 #if IS_ENABLED(CONFIG_IPV6)
518 int netpoll_parse_options(struct netpoll *np, char *opt)
520 char *cur=opt, *delim;
522 bool ipversion_set = false;
525 if ((delim = strchr(cur, '@')) == NULL)
528 if (kstrtou16(cur, 10, &np->local_port))
535 ipversion_set = true;
536 if ((delim = strchr(cur, '/')) == NULL)
539 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
543 np->ipv6 = (bool)ipv6;
549 /* parse out dev name */
550 if ((delim = strchr(cur, ',')) == NULL)
553 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
560 if ((delim = strchr(cur, '@')) == NULL)
563 if (*cur == ' ' || *cur == '\t')
564 np_info(np, "warning: whitespace is not allowed\n");
565 if (kstrtou16(cur, 10, &np->remote_port))
572 if ((delim = strchr(cur, '/')) == NULL)
575 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
578 else if (ipversion_set && np->ipv6 != (bool)ipv6)
581 np->ipv6 = (bool)ipv6;
586 if (!mac_pton(cur, np->remote_mac))
590 netpoll_print_options(np);
595 np_info(np, "couldn't parse config at '%s'!\n", cur);
598 EXPORT_SYMBOL(netpoll_parse_options);
600 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
602 struct netpoll_info *npinfo;
603 const struct net_device_ops *ops;
607 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
608 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
610 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
611 !ndev->netdev_ops->ndo_poll_controller) {
612 np_err(np, "%s doesn't support polling, aborting\n",
619 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
625 sema_init(&npinfo->dev_lock, 1);
626 skb_queue_head_init(&npinfo->txq);
627 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
629 atomic_set(&npinfo->refcnt, 1);
631 ops = np->dev->netdev_ops;
632 if (ops->ndo_netpoll_setup) {
633 err = ops->ndo_netpoll_setup(ndev, npinfo);
638 npinfo = rtnl_dereference(ndev->npinfo);
639 atomic_inc(&npinfo->refcnt);
642 npinfo->netpoll = np;
644 /* last thing to do is link it to the net device structure */
645 rcu_assign_pointer(ndev->npinfo, npinfo);
654 EXPORT_SYMBOL_GPL(__netpoll_setup);
656 int netpoll_setup(struct netpoll *np)
658 struct net_device *ndev = NULL;
659 struct in_device *in_dev;
664 struct net *net = current->nsproxy->net_ns;
665 ndev = __dev_get_by_name(net, np->dev_name);
668 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
674 if (netdev_master_upper_dev_get(ndev)) {
675 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
680 if (!netif_running(ndev)) {
681 unsigned long atmost, atleast;
683 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
685 err = dev_open(ndev);
688 np_err(np, "failed to open %s\n", ndev->name);
693 atleast = jiffies + HZ/10;
694 atmost = jiffies + carrier_timeout * HZ;
695 while (!netif_carrier_ok(ndev)) {
696 if (time_after(jiffies, atmost)) {
697 np_notice(np, "timeout waiting for carrier\n");
703 /* If carrier appears to come up instantly, we don't
704 * trust it and pause so that we don't pump all our
705 * queued console messages into the bitbucket.
708 if (time_before(jiffies, atleast)) {
709 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
715 if (!np->local_ip.ip) {
717 in_dev = __in_dev_get_rtnl(ndev);
719 if (!in_dev || !in_dev->ifa_list) {
720 np_err(np, "no IP address for %s, aborting\n",
726 np->local_ip.ip = in_dev->ifa_list->ifa_local;
727 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
729 #if IS_ENABLED(CONFIG_IPV6)
730 struct inet6_dev *idev;
733 idev = __in6_dev_get(ndev);
735 struct inet6_ifaddr *ifp;
737 read_lock_bh(&idev->lock);
738 list_for_each_entry(ifp, &idev->addr_list, if_list) {
739 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
741 np->local_ip.in6 = ifp->addr;
745 read_unlock_bh(&idev->lock);
748 np_err(np, "no IPv6 address for %s, aborting\n",
752 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
754 np_err(np, "IPv6 is not supported %s, aborting\n",
762 /* fill up the skb queue */
765 err = __netpoll_setup(np, ndev);
778 EXPORT_SYMBOL(netpoll_setup);
780 static int __init netpoll_init(void)
782 skb_queue_head_init(&skb_pool);
785 core_initcall(netpoll_init);
787 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
789 struct netpoll_info *npinfo =
790 container_of(rcu_head, struct netpoll_info, rcu);
792 skb_queue_purge(&npinfo->txq);
794 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
795 cancel_delayed_work(&npinfo->tx_work);
797 /* clean after last, unfinished work */
798 __skb_queue_purge(&npinfo->txq);
799 /* now cancel it again */
800 cancel_delayed_work(&npinfo->tx_work);
804 void __netpoll_cleanup(struct netpoll *np)
806 struct netpoll_info *npinfo;
808 /* rtnl_dereference would be preferable here but
809 * rcu_cleanup_netpoll path can put us in here safely without
810 * holding the rtnl, so plain rcu_dereference it is
812 npinfo = rtnl_dereference(np->dev->npinfo);
816 synchronize_srcu(&netpoll_srcu);
818 if (atomic_dec_and_test(&npinfo->refcnt)) {
819 const struct net_device_ops *ops;
821 ops = np->dev->netdev_ops;
822 if (ops->ndo_netpoll_cleanup)
823 ops->ndo_netpoll_cleanup(np->dev);
825 RCU_INIT_POINTER(np->dev->npinfo, NULL);
826 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
828 RCU_INIT_POINTER(np->dev->npinfo, NULL);
830 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
832 static void netpoll_async_cleanup(struct work_struct *work)
834 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
837 __netpoll_cleanup(np);
842 void __netpoll_free_async(struct netpoll *np)
844 schedule_work(&np->cleanup_work);
846 EXPORT_SYMBOL_GPL(__netpoll_free_async);
848 void netpoll_cleanup(struct netpoll *np)
853 __netpoll_cleanup(np);
859 EXPORT_SYMBOL(netpoll_cleanup);