Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / core / netpoll.c
1 /*
2  * Common framework for low-level network console, dump, and debugger code
3  *
4  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
5  *
6  * based on the netconsole code from:
7  *
8  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
9  * Copyright (C) 2002  Red Hat, Inc.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/moduleparam.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/inet.h>
21 #include <linux/interrupt.h>
22 #include <linux/netpoll.h>
23 #include <linux/sched.h>
24 #include <linux/delay.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <net/tcp.h>
30 #include <net/udp.h>
31 #include <asm/unaligned.h>
32 #include <trace/events/napi.h>
33
34 /*
35  * We maintain a small pool of fully-sized skbs, to make sure the
36  * message gets out even in extreme OOM situations.
37  */
38
39 #define MAX_UDP_CHUNK 1460
40 #define MAX_SKBS 32
41
42 static struct sk_buff_head skb_pool;
43
44 static atomic_t trapped;
45
46 #define USEC_PER_POLL   50
47 #define NETPOLL_RX_ENABLED  1
48 #define NETPOLL_RX_DROP     2
49
50 #define MAX_SKB_SIZE                                                    \
51         (sizeof(struct ethhdr) +                                        \
52          sizeof(struct iphdr) +                                         \
53          sizeof(struct udphdr) +                                        \
54          MAX_UDP_CHUNK)
55
56 static void zap_completion_queue(void);
57 static void arp_reply(struct sk_buff *skb);
58
59 static unsigned int carrier_timeout = 4;
60 module_param(carrier_timeout, uint, 0644);
61
62 #define np_info(np, fmt, ...)                           \
63         pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64 #define np_err(np, fmt, ...)                            \
65         pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66 #define np_notice(np, fmt, ...)                         \
67         pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
69 static void queue_process(struct work_struct *work)
70 {
71         struct netpoll_info *npinfo =
72                 container_of(work, struct netpoll_info, tx_work.work);
73         struct sk_buff *skb;
74         unsigned long flags;
75
76         while ((skb = skb_dequeue(&npinfo->txq))) {
77                 struct net_device *dev = skb->dev;
78                 const struct net_device_ops *ops = dev->netdev_ops;
79                 struct netdev_queue *txq;
80
81                 if (!netif_device_present(dev) || !netif_running(dev)) {
82                         __kfree_skb(skb);
83                         continue;
84                 }
85
86                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
87
88                 local_irq_save(flags);
89                 __netif_tx_lock(txq, smp_processor_id());
90                 if (netif_xmit_frozen_or_stopped(txq) ||
91                     ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
92                         skb_queue_head(&npinfo->txq, skb);
93                         __netif_tx_unlock(txq);
94                         local_irq_restore(flags);
95
96                         schedule_delayed_work(&npinfo->tx_work, HZ/10);
97                         return;
98                 }
99                 __netif_tx_unlock(txq);
100                 local_irq_restore(flags);
101         }
102 }
103
104 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
105                             unsigned short ulen, __be32 saddr, __be32 daddr)
106 {
107         __wsum psum;
108
109         if (uh->check == 0 || skb_csum_unnecessary(skb))
110                 return 0;
111
112         psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
113
114         if (skb->ip_summed == CHECKSUM_COMPLETE &&
115             !csum_fold(csum_add(psum, skb->csum)))
116                 return 0;
117
118         skb->csum = psum;
119
120         return __skb_checksum_complete(skb);
121 }
122
123 /*
124  * Check whether delayed processing was scheduled for our NIC. If so,
125  * we attempt to grab the poll lock and use ->poll() to pump the card.
126  * If this fails, either we've recursed in ->poll() or it's already
127  * running on another CPU.
128  *
129  * Note: we don't mask interrupts with this lock because we're using
130  * trylock here and interrupts are already disabled in the softirq
131  * case. Further, we test the poll_owner to avoid recursion on UP
132  * systems where the lock doesn't exist.
133  *
134  * In cases where there is bi-directional communications, reading only
135  * one message at a time can lead to packets being dropped by the
136  * network adapter, forcing superfluous retries and possibly timeouts.
137  * Thus, we set our budget to greater than 1.
138  */
139 static int poll_one_napi(struct netpoll_info *npinfo,
140                          struct napi_struct *napi, int budget)
141 {
142         int work;
143
144         /* net_rx_action's ->poll() invocations and our's are
145          * synchronized by this test which is only made while
146          * holding the napi->poll_lock.
147          */
148         if (!test_bit(NAPI_STATE_SCHED, &napi->state))
149                 return budget;
150
151         npinfo->rx_flags |= NETPOLL_RX_DROP;
152         atomic_inc(&trapped);
153         set_bit(NAPI_STATE_NPSVC, &napi->state);
154
155         work = napi->poll(napi, budget);
156         trace_napi_poll(napi);
157
158         clear_bit(NAPI_STATE_NPSVC, &napi->state);
159         atomic_dec(&trapped);
160         npinfo->rx_flags &= ~NETPOLL_RX_DROP;
161
162         return budget - work;
163 }
164
165 static void poll_napi(struct net_device *dev)
166 {
167         struct napi_struct *napi;
168         int budget = 16;
169
170         list_for_each_entry(napi, &dev->napi_list, dev_list) {
171                 if (napi->poll_owner != smp_processor_id() &&
172                     spin_trylock(&napi->poll_lock)) {
173                         budget = poll_one_napi(dev->npinfo, napi, budget);
174                         spin_unlock(&napi->poll_lock);
175
176                         if (!budget)
177                                 break;
178                 }
179         }
180 }
181
182 static void service_arp_queue(struct netpoll_info *npi)
183 {
184         if (npi) {
185                 struct sk_buff *skb;
186
187                 while ((skb = skb_dequeue(&npi->arp_tx)))
188                         arp_reply(skb);
189         }
190 }
191
192 static void netpoll_poll_dev(struct net_device *dev)
193 {
194         const struct net_device_ops *ops;
195
196         if (!dev || !netif_running(dev))
197                 return;
198
199         ops = dev->netdev_ops;
200         if (!ops->ndo_poll_controller)
201                 return;
202
203         /* Process pending work on NIC */
204         ops->ndo_poll_controller(dev);
205
206         poll_napi(dev);
207
208         if (dev->flags & IFF_SLAVE) {
209                 if (dev->npinfo) {
210                         struct net_device *bond_dev = dev->master;
211                         struct sk_buff *skb;
212                         while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
213                                 skb->dev = bond_dev;
214                                 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
215                         }
216                 }
217         }
218
219         service_arp_queue(dev->npinfo);
220
221         zap_completion_queue();
222 }
223
224 static void refill_skbs(void)
225 {
226         struct sk_buff *skb;
227         unsigned long flags;
228
229         spin_lock_irqsave(&skb_pool.lock, flags);
230         while (skb_pool.qlen < MAX_SKBS) {
231                 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
232                 if (!skb)
233                         break;
234
235                 __skb_queue_tail(&skb_pool, skb);
236         }
237         spin_unlock_irqrestore(&skb_pool.lock, flags);
238 }
239
240 static void zap_completion_queue(void)
241 {
242         unsigned long flags;
243         struct softnet_data *sd = &get_cpu_var(softnet_data);
244
245         if (sd->completion_queue) {
246                 struct sk_buff *clist;
247
248                 local_irq_save(flags);
249                 clist = sd->completion_queue;
250                 sd->completion_queue = NULL;
251                 local_irq_restore(flags);
252
253                 while (clist != NULL) {
254                         struct sk_buff *skb = clist;
255                         clist = clist->next;
256                         if (skb->destructor) {
257                                 atomic_inc(&skb->users);
258                                 dev_kfree_skb_any(skb); /* put this one back */
259                         } else {
260                                 __kfree_skb(skb);
261                         }
262                 }
263         }
264
265         put_cpu_var(softnet_data);
266 }
267
268 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
269 {
270         int count = 0;
271         struct sk_buff *skb;
272
273         zap_completion_queue();
274         refill_skbs();
275 repeat:
276
277         skb = alloc_skb(len, GFP_ATOMIC);
278         if (!skb)
279                 skb = skb_dequeue(&skb_pool);
280
281         if (!skb) {
282                 if (++count < 10) {
283                         netpoll_poll_dev(np->dev);
284                         goto repeat;
285                 }
286                 return NULL;
287         }
288
289         atomic_set(&skb->users, 1);
290         skb_reserve(skb, reserve);
291         return skb;
292 }
293
294 static int netpoll_owner_active(struct net_device *dev)
295 {
296         struct napi_struct *napi;
297
298         list_for_each_entry(napi, &dev->napi_list, dev_list) {
299                 if (napi->poll_owner == smp_processor_id())
300                         return 1;
301         }
302         return 0;
303 }
304
305 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306                              struct net_device *dev)
307 {
308         int status = NETDEV_TX_BUSY;
309         unsigned long tries;
310         const struct net_device_ops *ops = dev->netdev_ops;
311         /* It is up to the caller to keep npinfo alive. */
312         struct netpoll_info *npinfo = np->dev->npinfo;
313
314         if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315                 __kfree_skb(skb);
316                 return;
317         }
318
319         /* don't get messages out of order, and no recursion */
320         if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321                 struct netdev_queue *txq;
322                 unsigned long flags;
323
324                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325
326                 local_irq_save(flags);
327                 /* try until next clock tick */
328                 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329                      tries > 0; --tries) {
330                         if (__netif_tx_trylock(txq)) {
331                                 if (!netif_xmit_stopped(txq)) {
332                                         status = ops->ndo_start_xmit(skb, dev);
333                                         if (status == NETDEV_TX_OK)
334                                                 txq_trans_update(txq);
335                                 }
336                                 __netif_tx_unlock(txq);
337
338                                 if (status == NETDEV_TX_OK)
339                                         break;
340
341                         }
342
343                         /* tickle device maybe there is some cleanup */
344                         netpoll_poll_dev(np->dev);
345
346                         udelay(USEC_PER_POLL);
347                 }
348
349                 WARN_ONCE(!irqs_disabled(),
350                         "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
351                         dev->name, ops->ndo_start_xmit);
352
353                 local_irq_restore(flags);
354         }
355
356         if (status != NETDEV_TX_OK) {
357                 skb_queue_tail(&npinfo->txq, skb);
358                 schedule_delayed_work(&npinfo->tx_work,0);
359         }
360 }
361 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
362
363 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364 {
365         int total_len, ip_len, udp_len;
366         struct sk_buff *skb;
367         struct udphdr *udph;
368         struct iphdr *iph;
369         struct ethhdr *eth;
370
371         udp_len = len + sizeof(*udph);
372         ip_len = udp_len + sizeof(*iph);
373         total_len = ip_len + LL_RESERVED_SPACE(np->dev);
374
375         skb = find_skb(np, total_len + np->dev->needed_tailroom,
376                        total_len - len);
377         if (!skb)
378                 return;
379
380         skb_copy_to_linear_data(skb, msg, len);
381         skb_put(skb, len);
382
383         skb_push(skb, sizeof(*udph));
384         skb_reset_transport_header(skb);
385         udph = udp_hdr(skb);
386         udph->source = htons(np->local_port);
387         udph->dest = htons(np->remote_port);
388         udph->len = htons(udp_len);
389         udph->check = 0;
390         udph->check = csum_tcpudp_magic(np->local_ip,
391                                         np->remote_ip,
392                                         udp_len, IPPROTO_UDP,
393                                         csum_partial(udph, udp_len, 0));
394         if (udph->check == 0)
395                 udph->check = CSUM_MANGLED_0;
396
397         skb_push(skb, sizeof(*iph));
398         skb_reset_network_header(skb);
399         iph = ip_hdr(skb);
400
401         /* iph->version = 4; iph->ihl = 5; */
402         put_unaligned(0x45, (unsigned char *)iph);
403         iph->tos      = 0;
404         put_unaligned(htons(ip_len), &(iph->tot_len));
405         iph->id       = 0;
406         iph->frag_off = 0;
407         iph->ttl      = 64;
408         iph->protocol = IPPROTO_UDP;
409         iph->check    = 0;
410         put_unaligned(np->local_ip, &(iph->saddr));
411         put_unaligned(np->remote_ip, &(iph->daddr));
412         iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
413
414         eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
415         skb_reset_mac_header(skb);
416         skb->protocol = eth->h_proto = htons(ETH_P_IP);
417         memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
418         memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
419
420         skb->dev = np->dev;
421
422         netpoll_send_skb(np, skb);
423 }
424 EXPORT_SYMBOL(netpoll_send_udp);
425
426 static void arp_reply(struct sk_buff *skb)
427 {
428         struct netpoll_info *npinfo = skb->dev->npinfo;
429         struct arphdr *arp;
430         unsigned char *arp_ptr;
431         int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
432         __be32 sip, tip;
433         unsigned char *sha;
434         struct sk_buff *send_skb;
435         struct netpoll *np, *tmp;
436         unsigned long flags;
437         int hlen, tlen;
438         int hits = 0;
439
440         if (list_empty(&npinfo->rx_np))
441                 return;
442
443         /* Before checking the packet, we do some early
444            inspection whether this is interesting at all */
445         spin_lock_irqsave(&npinfo->rx_lock, flags);
446         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
447                 if (np->dev == skb->dev)
448                         hits++;
449         }
450         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
451
452         /* No netpoll struct is using this dev */
453         if (!hits)
454                 return;
455
456         /* No arp on this interface */
457         if (skb->dev->flags & IFF_NOARP)
458                 return;
459
460         if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
461                 return;
462
463         skb_reset_network_header(skb);
464         skb_reset_transport_header(skb);
465         arp = arp_hdr(skb);
466
467         if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
468              arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
469             arp->ar_pro != htons(ETH_P_IP) ||
470             arp->ar_op != htons(ARPOP_REQUEST))
471                 return;
472
473         arp_ptr = (unsigned char *)(arp+1);
474         /* save the location of the src hw addr */
475         sha = arp_ptr;
476         arp_ptr += skb->dev->addr_len;
477         memcpy(&sip, arp_ptr, 4);
478         arp_ptr += 4;
479         /* If we actually cared about dst hw addr,
480            it would get copied here */
481         arp_ptr += skb->dev->addr_len;
482         memcpy(&tip, arp_ptr, 4);
483
484         /* Should we ignore arp? */
485         if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
486                 return;
487
488         size = arp_hdr_len(skb->dev);
489
490         spin_lock_irqsave(&npinfo->rx_lock, flags);
491         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
492                 if (tip != np->local_ip)
493                         continue;
494
495                 hlen = LL_RESERVED_SPACE(np->dev);
496                 tlen = np->dev->needed_tailroom;
497                 send_skb = find_skb(np, size + hlen + tlen, hlen);
498                 if (!send_skb)
499                         continue;
500
501                 skb_reset_network_header(send_skb);
502                 arp = (struct arphdr *) skb_put(send_skb, size);
503                 send_skb->dev = skb->dev;
504                 send_skb->protocol = htons(ETH_P_ARP);
505
506                 /* Fill the device header for the ARP frame */
507                 if (dev_hard_header(send_skb, skb->dev, ptype,
508                                     sha, np->dev->dev_addr,
509                                     send_skb->len) < 0) {
510                         kfree_skb(send_skb);
511                         continue;
512                 }
513
514                 /*
515                  * Fill out the arp protocol part.
516                  *
517                  * we only support ethernet device type,
518                  * which (according to RFC 1390) should
519                  * always equal 1 (Ethernet).
520                  */
521
522                 arp->ar_hrd = htons(np->dev->type);
523                 arp->ar_pro = htons(ETH_P_IP);
524                 arp->ar_hln = np->dev->addr_len;
525                 arp->ar_pln = 4;
526                 arp->ar_op = htons(type);
527
528                 arp_ptr = (unsigned char *)(arp + 1);
529                 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
530                 arp_ptr += np->dev->addr_len;
531                 memcpy(arp_ptr, &tip, 4);
532                 arp_ptr += 4;
533                 memcpy(arp_ptr, sha, np->dev->addr_len);
534                 arp_ptr += np->dev->addr_len;
535                 memcpy(arp_ptr, &sip, 4);
536
537                 netpoll_send_skb(np, send_skb);
538
539                 /* If there are several rx_hooks for the same address,
540                    we're fine by sending a single reply */
541                 break;
542         }
543         spin_unlock_irqrestore(&npinfo->rx_lock, flags);
544 }
545
546 int __netpoll_rx(struct sk_buff *skb)
547 {
548         int proto, len, ulen;
549         int hits = 0;
550         const struct iphdr *iph;
551         struct udphdr *uh;
552         struct netpoll_info *npinfo = skb->dev->npinfo;
553         struct netpoll *np, *tmp;
554
555         if (list_empty(&npinfo->rx_np))
556                 goto out;
557
558         if (skb->dev->type != ARPHRD_ETHER)
559                 goto out;
560
561         /* check if netpoll clients need ARP */
562         if (skb->protocol == htons(ETH_P_ARP) &&
563             atomic_read(&trapped)) {
564                 skb_queue_tail(&npinfo->arp_tx, skb);
565                 return 1;
566         }
567
568         proto = ntohs(eth_hdr(skb)->h_proto);
569         if (proto != ETH_P_IP)
570                 goto out;
571         if (skb->pkt_type == PACKET_OTHERHOST)
572                 goto out;
573         if (skb_shared(skb))
574                 goto out;
575
576         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
577                 goto out;
578         iph = (struct iphdr *)skb->data;
579         if (iph->ihl < 5 || iph->version != 4)
580                 goto out;
581         if (!pskb_may_pull(skb, iph->ihl*4))
582                 goto out;
583         iph = (struct iphdr *)skb->data;
584         if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
585                 goto out;
586
587         len = ntohs(iph->tot_len);
588         if (skb->len < len || len < iph->ihl*4)
589                 goto out;
590
591         /*
592          * Our transport medium may have padded the buffer out.
593          * Now We trim to the true length of the frame.
594          */
595         if (pskb_trim_rcsum(skb, len))
596                 goto out;
597
598         iph = (struct iphdr *)skb->data;
599         if (iph->protocol != IPPROTO_UDP)
600                 goto out;
601
602         len -= iph->ihl*4;
603         uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
604         ulen = ntohs(uh->len);
605
606         if (ulen != len)
607                 goto out;
608         if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
609                 goto out;
610
611         list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
612                 if (np->local_ip && np->local_ip != iph->daddr)
613                         continue;
614                 if (np->remote_ip && np->remote_ip != iph->saddr)
615                         continue;
616                 if (np->local_port && np->local_port != ntohs(uh->dest))
617                         continue;
618
619                 np->rx_hook(np, ntohs(uh->source),
620                                (char *)(uh+1),
621                                ulen - sizeof(struct udphdr));
622                 hits++;
623         }
624
625         if (!hits)
626                 goto out;
627
628         kfree_skb(skb);
629         return 1;
630
631 out:
632         if (atomic_read(&trapped)) {
633                 kfree_skb(skb);
634                 return 1;
635         }
636
637         return 0;
638 }
639
640 void netpoll_print_options(struct netpoll *np)
641 {
642         np_info(np, "local port %d\n", np->local_port);
643         np_info(np, "local IP %pI4\n", &np->local_ip);
644         np_info(np, "interface '%s'\n", np->dev_name);
645         np_info(np, "remote port %d\n", np->remote_port);
646         np_info(np, "remote IP %pI4\n", &np->remote_ip);
647         np_info(np, "remote ethernet address %pM\n", np->remote_mac);
648 }
649 EXPORT_SYMBOL(netpoll_print_options);
650
651 int netpoll_parse_options(struct netpoll *np, char *opt)
652 {
653         char *cur=opt, *delim;
654
655         if (*cur != '@') {
656                 if ((delim = strchr(cur, '@')) == NULL)
657                         goto parse_failed;
658                 *delim = 0;
659                 np->local_port = simple_strtol(cur, NULL, 10);
660                 cur = delim;
661         }
662         cur++;
663
664         if (*cur != '/') {
665                 if ((delim = strchr(cur, '/')) == NULL)
666                         goto parse_failed;
667                 *delim = 0;
668                 np->local_ip = in_aton(cur);
669                 cur = delim;
670         }
671         cur++;
672
673         if (*cur != ',') {
674                 /* parse out dev name */
675                 if ((delim = strchr(cur, ',')) == NULL)
676                         goto parse_failed;
677                 *delim = 0;
678                 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
679                 cur = delim;
680         }
681         cur++;
682
683         if (*cur != '@') {
684                 /* dst port */
685                 if ((delim = strchr(cur, '@')) == NULL)
686                         goto parse_failed;
687                 *delim = 0;
688                 if (*cur == ' ' || *cur == '\t')
689                         np_info(np, "warning: whitespace is not allowed\n");
690                 np->remote_port = simple_strtol(cur, NULL, 10);
691                 cur = delim;
692         }
693         cur++;
694
695         /* dst ip */
696         if ((delim = strchr(cur, '/')) == NULL)
697                 goto parse_failed;
698         *delim = 0;
699         np->remote_ip = in_aton(cur);
700         cur = delim + 1;
701
702         if (*cur != 0) {
703                 /* MAC address */
704                 if (!mac_pton(cur, np->remote_mac))
705                         goto parse_failed;
706         }
707
708         netpoll_print_options(np);
709
710         return 0;
711
712  parse_failed:
713         np_info(np, "couldn't parse config at '%s'!\n", cur);
714         return -1;
715 }
716 EXPORT_SYMBOL(netpoll_parse_options);
717
718 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
719 {
720         struct netpoll_info *npinfo;
721         const struct net_device_ops *ops;
722         unsigned long flags;
723         int err;
724
725         np->dev = ndev;
726         strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
727
728         if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
729             !ndev->netdev_ops->ndo_poll_controller) {
730                 np_err(np, "%s doesn't support polling, aborting\n",
731                        np->dev_name);
732                 err = -ENOTSUPP;
733                 goto out;
734         }
735
736         if (!ndev->npinfo) {
737                 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
738                 if (!npinfo) {
739                         err = -ENOMEM;
740                         goto out;
741                 }
742
743                 npinfo->rx_flags = 0;
744                 INIT_LIST_HEAD(&npinfo->rx_np);
745
746                 spin_lock_init(&npinfo->rx_lock);
747                 skb_queue_head_init(&npinfo->arp_tx);
748                 skb_queue_head_init(&npinfo->txq);
749                 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
750
751                 atomic_set(&npinfo->refcnt, 1);
752
753                 ops = np->dev->netdev_ops;
754                 if (ops->ndo_netpoll_setup) {
755                         err = ops->ndo_netpoll_setup(ndev, npinfo);
756                         if (err)
757                                 goto free_npinfo;
758                 }
759         } else {
760                 npinfo = ndev->npinfo;
761                 atomic_inc(&npinfo->refcnt);
762         }
763
764         npinfo->netpoll = np;
765
766         if (np->rx_hook) {
767                 spin_lock_irqsave(&npinfo->rx_lock, flags);
768                 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
769                 list_add_tail(&np->rx, &npinfo->rx_np);
770                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
771         }
772
773         /* last thing to do is link it to the net device structure */
774         rcu_assign_pointer(ndev->npinfo, npinfo);
775
776         return 0;
777
778 free_npinfo:
779         kfree(npinfo);
780 out:
781         return err;
782 }
783 EXPORT_SYMBOL_GPL(__netpoll_setup);
784
785 int netpoll_setup(struct netpoll *np)
786 {
787         struct net_device *ndev = NULL;
788         struct in_device *in_dev;
789         int err;
790
791         if (np->dev_name)
792                 ndev = dev_get_by_name(&init_net, np->dev_name);
793         if (!ndev) {
794                 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
795                 return -ENODEV;
796         }
797
798         if (ndev->master) {
799                 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
800                 err = -EBUSY;
801                 goto put;
802         }
803
804         if (!netif_running(ndev)) {
805                 unsigned long atmost, atleast;
806
807                 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
808
809                 rtnl_lock();
810                 err = dev_open(ndev);
811                 rtnl_unlock();
812
813                 if (err) {
814                         np_err(np, "failed to open %s\n", ndev->name);
815                         goto put;
816                 }
817
818                 atleast = jiffies + HZ/10;
819                 atmost = jiffies + carrier_timeout * HZ;
820                 while (!netif_carrier_ok(ndev)) {
821                         if (time_after(jiffies, atmost)) {
822                                 np_notice(np, "timeout waiting for carrier\n");
823                                 break;
824                         }
825                         msleep(1);
826                 }
827
828                 /* If carrier appears to come up instantly, we don't
829                  * trust it and pause so that we don't pump all our
830                  * queued console messages into the bitbucket.
831                  */
832
833                 if (time_before(jiffies, atleast)) {
834                         np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
835                         msleep(4000);
836                 }
837         }
838
839         if (!np->local_ip) {
840                 rcu_read_lock();
841                 in_dev = __in_dev_get_rcu(ndev);
842
843                 if (!in_dev || !in_dev->ifa_list) {
844                         rcu_read_unlock();
845                         np_err(np, "no IP address for %s, aborting\n",
846                                np->dev_name);
847                         err = -EDESTADDRREQ;
848                         goto put;
849                 }
850
851                 np->local_ip = in_dev->ifa_list->ifa_local;
852                 rcu_read_unlock();
853                 np_info(np, "local IP %pI4\n", &np->local_ip);
854         }
855
856         /* fill up the skb queue */
857         refill_skbs();
858
859         rtnl_lock();
860         err = __netpoll_setup(np, ndev);
861         rtnl_unlock();
862
863         if (err)
864                 goto put;
865
866         return 0;
867
868 put:
869         dev_put(ndev);
870         return err;
871 }
872 EXPORT_SYMBOL(netpoll_setup);
873
874 static int __init netpoll_init(void)
875 {
876         skb_queue_head_init(&skb_pool);
877         return 0;
878 }
879 core_initcall(netpoll_init);
880
881 void __netpoll_cleanup(struct netpoll *np)
882 {
883         struct netpoll_info *npinfo;
884         unsigned long flags;
885
886         npinfo = np->dev->npinfo;
887         if (!npinfo)
888                 return;
889
890         if (!list_empty(&npinfo->rx_np)) {
891                 spin_lock_irqsave(&npinfo->rx_lock, flags);
892                 list_del(&np->rx);
893                 if (list_empty(&npinfo->rx_np))
894                         npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
895                 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
896         }
897
898         if (atomic_dec_and_test(&npinfo->refcnt)) {
899                 const struct net_device_ops *ops;
900
901                 ops = np->dev->netdev_ops;
902                 if (ops->ndo_netpoll_cleanup)
903                         ops->ndo_netpoll_cleanup(np->dev);
904
905                 RCU_INIT_POINTER(np->dev->npinfo, NULL);
906
907                 /* avoid racing with NAPI reading npinfo */
908                 synchronize_rcu_bh();
909
910                 skb_queue_purge(&npinfo->arp_tx);
911                 skb_queue_purge(&npinfo->txq);
912                 cancel_delayed_work_sync(&npinfo->tx_work);
913
914                 /* clean after last, unfinished work */
915                 __skb_queue_purge(&npinfo->txq);
916                 kfree(npinfo);
917         }
918 }
919 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
920
921 void netpoll_cleanup(struct netpoll *np)
922 {
923         if (!np->dev)
924                 return;
925
926         rtnl_lock();
927         __netpoll_cleanup(np);
928         rtnl_unlock();
929
930         dev_put(np->dev);
931         np->dev = NULL;
932 }
933 EXPORT_SYMBOL(netpoll_cleanup);
934
935 int netpoll_trap(void)
936 {
937         return atomic_read(&trapped);
938 }
939 EXPORT_SYMBOL(netpoll_trap);
940
941 void netpoll_set_trap(int trap)
942 {
943         if (trap)
944                 atomic_inc(&trapped);
945         else
946                 atomic_dec(&trapped);
947 }
948 EXPORT_SYMBOL(netpoll_set_trap);