3d84fb9d88739629b32c77f1a1d77c7f55ad7630
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / core / netpoll.c
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/moduleparam.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/inet.h>
21 #include <linux/interrupt.h>
22 #include <linux/netpoll.h>
23 #include <linux/sched.h>
24 #include <linux/delay.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <net/tcp.h>
30 #include <net/udp.h>
31 #include <asm/unaligned.h>
32 #include <trace/events/napi.h>
33
34 /*
35  * We maintain a small pool of fully-sized skbs, to make sure the
36  * message gets out even in extreme OOM situations.
37  */
38
39 #define MAX_UDP_CHUNK 1460
40 #define MAX_SKBS 32
41
42 static struct sk_buff_head skb_pool;
43
44 static atomic_t trapped;
45
46 #define USEC_PER_POLL   50
47 #define NETPOLL_RX_ENABLED  1
48 #define NETPOLL_RX_DROP     2
49
50 #define MAX_SKB_SIZE                                                    \
51         (sizeof(struct ethhdr) +                                        \
52          sizeof(struct iphdr) +                                         \
53          sizeof(struct udphdr) +                                        \
54          MAX_UDP_CHUNK)
55
56 static void zap_completion_queue(void);
57 static void arp_reply(struct sk_buff *skb);
58
59 static unsigned int carrier_timeout = 4;
60 module_param(carrier_timeout, uint, 0644);
61
62 #define np_info(np, fmt, ...)                           \
63         pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64 #define np_err(np, fmt, ...)                            \
65         pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66 #define np_notice(np, fmt, ...)                         \
67         pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
69 static void queue_process(struct work_struct *work)
70 {
71         struct netpoll_info *npinfo =
72                 container_of(work, struct netpoll_info, tx_work.work);
73         struct sk_buff *skb;
74         unsigned long flags;
75
76         while ((skb = skb_dequeue(&npinfo->txq))) {
77                 struct net_device *dev = skb->dev;
78                 const struct net_device_ops *ops = dev->netdev_ops;
79                 struct netdev_queue *txq;
80
81                 if (!netif_device_present(dev) || !netif_running(dev)) {
82                         __kfree_skb(skb);
83                         continue;
84                 }
85
86                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
87
88                 local_irq_save(flags);
89                 __netif_tx_lock(txq, smp_processor_id());
90                 if (netif_xmit_frozen_or_stopped(txq) ||
91                     ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
92                         skb_queue_head(&npinfo->txq, skb);
93                         __netif_tx_unlock(txq);
94                         local_irq_restore(flags);
95
96                         schedule_delayed_work(&npinfo->tx_work, HZ/10);
97                         return;
98                 }
99                 __netif_tx_unlock(txq);
100                 local_irq_restore(flags);
101         }
102 }
103
104 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
105                             unsigned short ulen, __be32 saddr, __be32 daddr)
106 {
107         __wsum psum;
108
109         if (uh->check == 0 || skb_csum_unnecessary(skb))
110                 return 0;
111
112         psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
113
114         if (skb->ip_summed == CHECKSUM_COMPLETE &&
115             !csum_fold(csum_add(psum, skb->csum)))
116                 return 0;
117
118         skb->csum = psum;
119
120         return __skb_checksum_complete(skb);
121 }
122
123 /*
124  * Check whether delayed processing was scheduled for our NIC. If so,
125  * we attempt to grab the poll lock and use ->poll() to pump the card.
126  * If this fails, either we've recursed in ->poll() or it's already
127  * running on another CPU.
128  *
129  * Note: we don't mask interrupts with this lock because we're using
130  * trylock here and interrupts are already disabled in the softirq
131  * case. Further, we test the poll_owner to avoid recursion on UP
132  * systems where the lock doesn't exist.
133  *
134  * In cases where there is bi-directional communications, reading only
135  * one message at a time can lead to packets being dropped by the
136  * network adapter, forcing superfluous retries and possibly timeouts.
137  * Thus, we set our budget to greater than 1.
138  */
139 static int poll_one_napi(struct netpoll_info *npinfo,
140                          struct napi_struct *napi, int budget)
141 {
142         int work;
143
144         /* net_rx_action's ->poll() invocations and our's are
145          * synchronized by this test which is only made while
146          * holding the napi->poll_lock.
147          */
148         if (!test_bit(NAPI_STATE_SCHED, &napi->state))
149                 return budget;
150
151         npinfo->rx_flags |= NETPOLL_RX_DROP;
152         atomic_inc(&trapped);
153         set_bit(NAPI_STATE_NPSVC, &napi->state);
154
155         work = napi->poll(napi, budget);
156         trace_napi_poll(napi);
157
158         clear_bit(NAPI_STATE_NPSVC, &napi->state);
159         atomic_dec(&trapped);
160         npinfo->rx_flags &= ~NETPOLL_RX_DROP;
161
162         return budget - work;
163 }
164
165 static void poll_napi(struct net_device *dev)
166 {
167         struct napi_struct *napi;
168         int budget = 16;
169
170         list_for_each_entry(napi, &dev->napi_list, dev_list) {
171                 if (napi->poll_owner != smp_processor_id() &&
172                     spin_trylock(&napi->poll_lock)) {
173                         budget = poll_one_napi(dev->npinfo, napi, budget);
174                         spin_unlock(&napi->poll_lock);
175
176                         if (!budget)
177                                 break;
178                 }
179         }
180 }
181
182 static void service_arp_queue(struct netpoll_info *npi)
183 {
184         if (npi) {
185                 struct sk_buff *skb;
186
187                 while ((skb = skb_dequeue(&npi->arp_tx)))
188                         arp_reply(skb);
189         }
190 }
191
192 static void netpoll_poll_dev(struct net_device *dev)
193 {
194         const struct net_device_ops *ops;
195
196         if (!dev || !netif_running(dev))
197                 return;
198
199         ops = dev->netdev_ops;
200         if (!ops->ndo_poll_controller)
201                 return;
202
203         /* Process pending work on NIC */
204         ops->ndo_poll_controller(dev);
205
206         poll_napi(dev);
207
208         if (dev->flags & IFF_SLAVE) {
209                 if (dev->npinfo) {
210                         struct net_device *bond_dev = dev->master;
211                         struct sk_buff *skb;
212                         while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
213                                 skb->dev = bond_dev;
214                                 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
215                         }
216                 }
217         }
218
219         service_arp_queue(dev->npinfo);
220
221         zap_completion_queue();
222 }
223
224 static void refill_skbs(void)
225 {
226         struct sk_buff *skb;
227         unsigned long flags;
228
229         spin_lock_irqsave(&skb_pool.lock, flags);
230         while (skb_pool.qlen < MAX_SKBS) {
231                 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
232                 if (!skb)
233                         break;
234
235                 __skb_queue_tail(&skb_pool, skb);
236         }
237         spin_unlock_irqrestore(&skb_pool.lock, flags);
238 }
239
240 static void zap_completion_queue(void)
241 {
242         unsigned long flags;
243         struct softnet_data *sd = &get_cpu_var(softnet_data);
244
245         if (sd->completion_queue) {
246                 struct sk_buff *clist;
247
248                 local_irq_save(flags);
249                 clist = sd->completion_queue;
250                 sd->completion_queue = NULL;
251                 local_irq_restore(flags);
252
253                 while (clist != NULL) {
254                         struct sk_buff *skb = clist;
255                         clist = clist->next;
256                         if (skb->destructor) {
257                                 atomic_inc(&skb->users);
258                                 dev_kfree_skb_any(skb); /* put this one back */
259                         } else {
260                                 __kfree_skb(skb);
261                         }
262                 }
263         }
264
265         put_cpu_var(softnet_data);
266 }
267
268 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
269 {
270         int count = 0;
271         struct sk_buff *skb;
272
273         zap_completion_queue();
274         refill_skbs();
275 repeat:
276
277         skb = alloc_skb(len, GFP_ATOMIC);
278         if (!skb)
279                 skb = skb_dequeue(&skb_pool);
280
281         if (!skb) {
282                 if (++count < 10) {
283                         netpoll_poll_dev(np->dev);
284                         goto repeat;
285                 }
286                 return NULL;
287         }
288
289         atomic_set(&skb->users, 1);
290         skb_reserve(skb, reserve);
291         return skb;
292 }
293
294 static int netpoll_owner_active(struct net_device *dev)
295 {
296         struct napi_struct *napi;
297
298         list_for_each_entry(napi, &dev->napi_list, dev_list) {
299                 if (napi->poll_owner == smp_processor_id())
300                         return 1;
301         }
302         return 0;
303 }
304
305 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306                              struct net_device *dev)
307 {
308         int status = NETDEV_TX_BUSY;
309         unsigned long tries;
310         const struct net_device_ops *ops = dev->netdev_ops;
311         /* It is up to the caller to keep npinfo alive. */
312         struct netpoll_info *npinfo = np->dev->npinfo;
313
314         if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315                 __kfree_skb(skb);
316                 return;
317         }
318
319         /* don't get messages out of order, and no recursion */
320         if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321                 struct netdev_queue *txq;
322                 unsigned long flags;
323
324                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325
326                 local_irq_save(flags);
327                 /* try until next clock tick */
328                 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329                      tries > 0; --tries) {
330                         if (__netif_tx_trylock(txq)) {
331                                 if (!netif_xmit_stopped(txq)) {
332                                         status = ops->ndo_start_xmit(skb, dev);
333                                         if (status == NETDEV_TX_OK)
334                                                 txq_trans_update(txq);
335                                 }
336                                 __netif_tx_unlock(txq);
337
338                                 if (status == NETDEV_TX_OK)
339                                         break;
340
341                         }
342
343                         /* tickle device maybe there is some cleanup */
344                         netpoll_poll_dev(np->dev);
345
346                         udelay(USEC_PER_POLL);
347                 }
348
349                 WARN_ONCE(!irqs_disabled(),
350                         "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
351                         dev->name, ops->ndo_start_xmit);
352
353                 local_irq_restore(flags);
354         }
355
356         if (status != NETDEV_TX_OK) {
357                 skb_queue_tail(&npinfo->txq, skb);
358                 schedule_delayed_work(&npinfo->tx_work,0);
359         }
360 }
361 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
362
363 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364 {
365         int total_len, eth_len, ip_len, udp_len;
366         struct sk_buff *skb;
367         struct udphdr *udph;
368         struct iphdr *iph;
369         struct ethhdr *eth;
370
371         udp_len = len + sizeof(*udph);
372         ip_len = eth_len = udp_len + sizeof(*iph);
373         total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
374
375         skb = find_skb(np, total_len, total_len - len);
376         if (!skb)
377                 return;
378
379         skb_copy_to_linear_data(skb, msg, len);
380         skb->len += len;
381
382         skb_push(skb, sizeof(*udph));
383         skb_reset_transport_header(skb);
384         udph = udp_hdr(skb);
385         udph->source = htons(np->local_port);
386         udph->dest = htons(np->remote_port);
387         udph->len = htons(udp_len);
388         udph->check = 0;
389         udph->check = csum_tcpudp_magic(np->local_ip,
390                                         np->remote_ip,
391                                         udp_len, IPPROTO_UDP,
392                                         csum_partial(udph, udp_len, 0));
393         if (udph->check == 0)
394                 udph->check = CSUM_MANGLED_0;
395
396         skb_push(skb, sizeof(*iph));
397         skb_reset_network_header(skb);
398         iph = ip_hdr(skb);
399
400         /* iph->version = 4; iph->ihl = 5; */
401         put_unaligned(0x45, (unsigned char *)iph);
402         iph->tos      = 0;
403         put_unaligned(htons(ip_len), &(iph->tot_len));
404         iph->id       = 0;
405         iph->frag_off = 0;
406         iph->ttl      = 64;
407         iph->protocol = IPPROTO_UDP;
408         iph->check    = 0;
409         put_unaligned(np->local_ip, &(iph->saddr));
410         put_unaligned(np->remote_ip, &(iph->daddr));
411         iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
412
413         eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
414         skb_reset_mac_header(skb);
415         skb->protocol = eth->h_proto = htons(ETH_P_IP);
416         memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
417         memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
418
419         skb->dev = np->dev;
420
421         netpoll_send_skb(np, skb);
422 }
423 EXPORT_SYMBOL(netpoll_send_udp);
424
425 static void arp_reply(struct sk_buff *skb)
426 {
427         struct netpoll_info *npinfo = skb->dev->npinfo;
428         struct arphdr *arp;
429         unsigned char *arp_ptr;
430         int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
431         __be32 sip, tip;
432         unsigned char *sha;
433         struct sk_buff *send_skb;
434         struct netpoll *np, *tmp;
435         unsigned long flags;
436         int hlen, tlen;
437         int hits = 0;
438
439         if (list_empty(&npinfo->rx_np))
440                 return;
441
442         /* Before checking the packet, we do some early
443            inspection whether this is interesting at all */
444         spin_lock_irqsave(&npinfo->rx_lock, flags);
445         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
446                 if (np->dev == skb->dev)
447                         hits++;
448         }
449         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
450
451         /* No netpoll struct is using this dev */
452         if (!hits)
453                 return;
454
455         /* No arp on this interface */
456         if (skb->dev->flags & IFF_NOARP)
457                 return;
458
459         if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
460                 return;
461
462         skb_reset_network_header(skb);
463         skb_reset_transport_header(skb);
464         arp = arp_hdr(skb);
465
466         if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
467              arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
468             arp->ar_pro != htons(ETH_P_IP) ||
469             arp->ar_op != htons(ARPOP_REQUEST))
470                 return;
471
472         arp_ptr = (unsigned char *)(arp+1);
473         /* save the location of the src hw addr */
474         sha = arp_ptr;
475         arp_ptr += skb->dev->addr_len;
476         memcpy(&sip, arp_ptr, 4);
477         arp_ptr += 4;
478         /* If we actually cared about dst hw addr,
479            it would get copied here */
480         arp_ptr += skb->dev->addr_len;
481         memcpy(&tip, arp_ptr, 4);
482
483         /* Should we ignore arp? */
484         if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
485                 return;
486
487         size = arp_hdr_len(skb->dev);
488
489         spin_lock_irqsave(&npinfo->rx_lock, flags);
490         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
491                 if (tip != np->local_ip)
492                         continue;
493
494                 hlen = LL_RESERVED_SPACE(np->dev);
495                 tlen = np->dev->needed_tailroom;
496                 send_skb = find_skb(np, size + hlen + tlen, hlen);
497                 if (!send_skb)
498                         continue;
499
500                 skb_reset_network_header(send_skb);
501                 arp = (struct arphdr *) skb_put(send_skb, size);
502                 send_skb->dev = skb->dev;
503                 send_skb->protocol = htons(ETH_P_ARP);
504
505                 /* Fill the device header for the ARP frame */
506                 if (dev_hard_header(send_skb, skb->dev, ptype,
507                                     sha, np->dev->dev_addr,
508                                     send_skb->len) < 0) {
509                         kfree_skb(send_skb);
510                         continue;
511                 }
512
513                 /*
514                  * Fill out the arp protocol part.
515                  *
516                  * we only support ethernet device type,
517                  * which (according to RFC 1390) should
518                  * always equal 1 (Ethernet).
519                  */
520
521                 arp->ar_hrd = htons(np->dev->type);
522                 arp->ar_pro = htons(ETH_P_IP);
523                 arp->ar_hln = np->dev->addr_len;
524                 arp->ar_pln = 4;
525                 arp->ar_op = htons(type);
526
527                 arp_ptr = (unsigned char *)(arp + 1);
528                 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
529                 arp_ptr += np->dev->addr_len;
530                 memcpy(arp_ptr, &tip, 4);
531                 arp_ptr += 4;
532                 memcpy(arp_ptr, sha, np->dev->addr_len);
533                 arp_ptr += np->dev->addr_len;
534                 memcpy(arp_ptr, &sip, 4);
535
536                 netpoll_send_skb(np, send_skb);
537
538                 /* If there are several rx_hooks for the same address,
539                    we're fine by sending a single reply */
540                 break;
541         }
542         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
543 }
544
545 int __netpoll_rx(struct sk_buff *skb)
546 {
547         int proto, len, ulen;
548         int hits = 0;
549         const struct iphdr *iph;
550         struct udphdr *uh;
551         struct netpoll_info *npinfo = skb->dev->npinfo;
552         struct netpoll *np, *tmp;
553
554         if (list_empty(&npinfo->rx_np))
555                 goto out;
556
557         if (skb->dev->type != ARPHRD_ETHER)
558                 goto out;
559
560         /* check if netpoll clients need ARP */
561         if (skb->protocol == htons(ETH_P_ARP) &&
562             atomic_read(&trapped)) {
563                 skb_queue_tail(&npinfo->arp_tx, skb);
564                 return 1;
565         }
566
567         proto = ntohs(eth_hdr(skb)->h_proto);
568         if (proto != ETH_P_IP)
569                 goto out;
570         if (skb->pkt_type == PACKET_OTHERHOST)
571                 goto out;
572         if (skb_shared(skb))
573                 goto out;
574
575         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
576                 goto out;
577         iph = (struct iphdr *)skb->data;
578         if (iph->ihl < 5 || iph->version != 4)
579                 goto out;
580         if (!pskb_may_pull(skb, iph->ihl*4))
581                 goto out;
582         iph = (struct iphdr *)skb->data;
583         if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
584                 goto out;
585
586         len = ntohs(iph->tot_len);
587         if (skb->len < len || len < iph->ihl*4)
588                 goto out;
589
590         /*
591          * Our transport medium may have padded the buffer out.
592          * Now We trim to the true length of the frame.
593          */
594         if (pskb_trim_rcsum(skb, len))
595                 goto out;
596
597         iph = (struct iphdr *)skb->data;
598         if (iph->protocol != IPPROTO_UDP)
599                 goto out;
600
601         len -= iph->ihl*4;
602         uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
603         ulen = ntohs(uh->len);
604
605         if (ulen != len)
606                 goto out;
607         if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
608                 goto out;
609
610         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
611                 if (np->local_ip && np->local_ip != iph->daddr)
612                         continue;
613                 if (np->remote_ip && np->remote_ip != iph->saddr)
614                         continue;
615                 if (np->local_port && np->local_port != ntohs(uh->dest))
616                         continue;
617
618                 np->rx_hook(np, ntohs(uh->source),
619                                (char *)(uh+1),
620                                ulen - sizeof(struct udphdr));
621                 hits++;
622         }
623
624         if (!hits)
625                 goto out;
626
627         kfree_skb(skb);
628         return 1;
629
630 out:
631         if (atomic_read(&trapped)) {
632                 kfree_skb(skb);
633                 return 1;
634         }
635
636         return 0;
637 }
638
639 void netpoll_print_options(struct netpoll *np)
640 {
641         np_info(np, "local port %d\n", np->local_port);
642         np_info(np, "local IP %pI4\n", &np->local_ip);
643         np_info(np, "interface '%s'\n", np->dev_name);
644         np_info(np, "remote port %d\n", np->remote_port);
645         np_info(np, "remote IP %pI4\n", &np->remote_ip);
646         np_info(np, "remote ethernet address %pM\n", np->remote_mac);
647 }
648 EXPORT_SYMBOL(netpoll_print_options);
649
650 int netpoll_parse_options(struct netpoll *np, char *opt)
651 {
652         char *cur=opt, *delim;
653
654         if (*cur != '@') {
655                 if ((delim = strchr(cur, '@')) == NULL)
656                         goto parse_failed;
657                 *delim = 0;
658                 np->local_port = simple_strtol(cur, NULL, 10);
659                 cur = delim;
660         }
661         cur++;
662
663         if (*cur != '/') {
664                 if ((delim = strchr(cur, '/')) == NULL)
665                         goto parse_failed;
666                 *delim = 0;
667                 np->local_ip = in_aton(cur);
668                 cur = delim;
669         }
670         cur++;
671
672         if (*cur != ',') {
673                 /* parse out dev name */
674                 if ((delim = strchr(cur, ',')) == NULL)
675                         goto parse_failed;
676                 *delim = 0;
677                 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
678                 cur = delim;
679         }
680         cur++;
681
682         if (*cur != '@') {
683                 /* dst port */
684                 if ((delim = strchr(cur, '@')) == NULL)
685                         goto parse_failed;
686                 *delim = 0;
687                 if (*cur == ' ' || *cur == '\t')
688                         np_info(np, "warning: whitespace is not allowed\n");
689                 np->remote_port = simple_strtol(cur, NULL, 10);
690                 cur = delim;
691         }
692         cur++;
693
694         /* dst ip */
695         if ((delim = strchr(cur, '/')) == NULL)
696                 goto parse_failed;
697         *delim = 0;
698         np->remote_ip = in_aton(cur);
699         cur = delim + 1;
700
701         if (*cur != 0) {
702                 /* MAC address */
703                 if (!mac_pton(cur, np->remote_mac))
704                         goto parse_failed;
705         }
706
707         netpoll_print_options(np);
708
709         return 0;
710
711  parse_failed:
712         np_info(np, "couldn't parse config at '%s'!\n", cur);
713         return -1;
714 }
715 EXPORT_SYMBOL(netpoll_parse_options);
716
717 int __netpoll_setup(struct netpoll *np)
718 {
719         struct net_device *ndev = np->dev;
720         struct netpoll_info *npinfo;
721         const struct net_device_ops *ops;
722         unsigned long flags;
723         int err;
724
725         if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
726             !ndev->netdev_ops->ndo_poll_controller) {
727                 np_err(np, "%s doesn't support polling, aborting\n",
728                        np->dev_name);
729                 err = -ENOTSUPP;
730                 goto out;
731         }
732
733         if (!ndev->npinfo) {
734                 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
735                 if (!npinfo) {
736                         err = -ENOMEM;
737                         goto out;
738                 }
739
740                 npinfo->rx_flags = 0;
741                 INIT_LIST_HEAD(&npinfo->rx_np);
742
743                 spin_lock_init(&npinfo->rx_lock);
744                 skb_queue_head_init(&npinfo->arp_tx);
745                 skb_queue_head_init(&npinfo->txq);
746                 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
747
748                 atomic_set(&npinfo->refcnt, 1);
749
750                 ops = np->dev->netdev_ops;
751                 if (ops->ndo_netpoll_setup) {
752                         err = ops->ndo_netpoll_setup(ndev, npinfo);
753                         if (err)
754                                 goto free_npinfo;
755                 }
756         } else {
757                 npinfo = ndev->npinfo;
758                 atomic_inc(&npinfo->refcnt);
759         }
760
761         npinfo->netpoll = np;
762
763         if (np->rx_hook) {
764                 spin_lock_irqsave(&npinfo->rx_lock, flags);
765                 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
766                 list_add_tail(&np->rx, &npinfo->rx_np);
767                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
768         }
769
770         /* last thing to do is link it to the net device structure */
771         rcu_assign_pointer(ndev->npinfo, npinfo);
772
773         return 0;
774
775 free_npinfo:
776         kfree(npinfo);
777 out:
778         return err;
779 }
780 EXPORT_SYMBOL_GPL(__netpoll_setup);
781
782 int netpoll_setup(struct netpoll *np)
783 {
784         struct net_device *ndev = NULL;
785         struct in_device *in_dev;
786         int err;
787
788         if (np->dev_name)
789                 ndev = dev_get_by_name(&init_net, np->dev_name);
790         if (!ndev) {
791                 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
792                 return -ENODEV;
793         }
794
795         if (ndev->master) {
796                 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
797                 err = -EBUSY;
798                 goto put;
799         }
800
801         if (!netif_running(ndev)) {
802                 unsigned long atmost, atleast;
803
804                 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
805
806                 rtnl_lock();
807                 err = dev_open(ndev);
808                 rtnl_unlock();
809
810                 if (err) {
811                         np_err(np, "failed to open %s\n", ndev->name);
812                         goto put;
813                 }
814
815                 atleast = jiffies + HZ/10;
816                 atmost = jiffies + carrier_timeout * HZ;
817                 while (!netif_carrier_ok(ndev)) {
818                         if (time_after(jiffies, atmost)) {
819                                 np_notice(np, "timeout waiting for carrier\n");
820                                 break;
821                         }
822                         msleep(1);
823                 }
824
825                 /* If carrier appears to come up instantly, we don't
826                  * trust it and pause so that we don't pump all our
827                  * queued console messages into the bitbucket.
828                  */
829
830                 if (time_before(jiffies, atleast)) {
831                         np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
832                         msleep(4000);
833                 }
834         }
835
836         if (!np->local_ip) {
837                 rcu_read_lock();
838                 in_dev = __in_dev_get_rcu(ndev);
839
840                 if (!in_dev || !in_dev->ifa_list) {
841                         rcu_read_unlock();
842                         np_err(np, "no IP address for %s, aborting\n",
843                                np->dev_name);
844                         err = -EDESTADDRREQ;
845                         goto put;
846                 }
847
848                 np->local_ip = in_dev->ifa_list->ifa_local;
849                 rcu_read_unlock();
850                 np_info(np, "local IP %pI4\n", &np->local_ip);
851         }
852
853         np->dev = ndev;
854
855         /* fill up the skb queue */
856         refill_skbs();
857
858         rtnl_lock();
859         err = __netpoll_setup(np);
860         rtnl_unlock();
861
862         if (err)
863                 goto put;
864
865         return 0;
866
867 put:
868         dev_put(ndev);
869         return err;
870 }
871 EXPORT_SYMBOL(netpoll_setup);
872
873 static int __init netpoll_init(void)
874 {
875         skb_queue_head_init(&skb_pool);
876         return 0;
877 }
878 core_initcall(netpoll_init);
879
880 void __netpoll_cleanup(struct netpoll *np)
881 {
882         struct netpoll_info *npinfo;
883         unsigned long flags;
884
885         npinfo = np->dev->npinfo;
886         if (!npinfo)
887                 return;
888
889         if (!list_empty(&npinfo->rx_np)) {
890                 spin_lock_irqsave(&npinfo->rx_lock, flags);
891                 list_del(&np->rx);
892                 if (list_empty(&npinfo->rx_np))
893                         npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
894                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
895         }
896
897         if (atomic_dec_and_test(&npinfo->refcnt)) {
898                 const struct net_device_ops *ops;
899
900                 ops = np->dev->netdev_ops;
901                 if (ops->ndo_netpoll_cleanup)
902                         ops->ndo_netpoll_cleanup(np->dev);
903
904                 RCU_INIT_POINTER(np->dev->npinfo, NULL);
905
906                 /* avoid racing with NAPI reading npinfo */
907                 synchronize_rcu_bh();
908
909                 skb_queue_purge(&npinfo->arp_tx);
910                 skb_queue_purge(&npinfo->txq);
911                 cancel_delayed_work_sync(&npinfo->tx_work);
912
913                 /* clean after last, unfinished work */
914                 __skb_queue_purge(&npinfo->txq);
915                 kfree(npinfo);
916         }
917 }
918 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
919
920 void netpoll_cleanup(struct netpoll *np)
921 {
922         if (!np->dev)
923                 return;
924
925         rtnl_lock();
926         __netpoll_cleanup(np);
927         rtnl_unlock();
928
929         dev_put(np->dev);
930         np->dev = NULL;
931 }
932 EXPORT_SYMBOL(netpoll_cleanup);
933
934 int netpoll_trap(void)
935 {
936         return atomic_read(&trapped);
937 }
938 EXPORT_SYMBOL(netpoll_trap);
939
940 void netpoll_set_trap(int trap)
941 {
942         if (trap)
943                 atomic_inc(&trapped);
944         else
945                 atomic_dec(&trapped);
946 }
947 EXPORT_SYMBOL(netpoll_set_trap);