Merge patch series "Some style cleanups for recent extension additions"
[platform/kernel/linux-starfive.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool.h>
49 #include <linux/bpf_trace.h>
50
51 #include <xen/xen.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
57
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
61
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues;
65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
67                  "Maximum number of queues per virtual interface");
68
69 static bool __read_mostly xennet_trusted = true;
70 module_param_named(trusted, xennet_trusted, bool, 0644);
71 MODULE_PARM_DESC(trusted, "Is the backend trusted");
72
73 #define XENNET_TIMEOUT  (5 * HZ)
74
75 static const struct ethtool_ops xennet_ethtool_ops;
76
77 struct netfront_cb {
78         int pull_to;
79 };
80
81 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
82
83 #define RX_COPY_THRESHOLD 256
84
85 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
86 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
87
88 /* Minimum number of Rx slots (includes slot for GSO metadata). */
89 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
90
91 /* Queue name is interface name with "-qNNN" appended */
92 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
93
94 /* IRQ name is queue name with "-tx" or "-rx" appended */
95 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
96
97 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
98
99 struct netfront_stats {
100         u64                     packets;
101         u64                     bytes;
102         struct u64_stats_sync   syncp;
103 };
104
105 struct netfront_info;
106
107 struct netfront_queue {
108         unsigned int id; /* Queue ID, 0-based */
109         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
110         struct netfront_info *info;
111
112         struct bpf_prog __rcu *xdp_prog;
113
114         struct napi_struct napi;
115
116         /* Split event channels support, tx_* == rx_* when using
117          * single event channel.
118          */
119         unsigned int tx_evtchn, rx_evtchn;
120         unsigned int tx_irq, rx_irq;
121         /* Only used when split event channels support is enabled */
122         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
123         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
124
125         spinlock_t   tx_lock;
126         struct xen_netif_tx_front_ring tx;
127         int tx_ring_ref;
128
129         /*
130          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
131          * are linked from tx_skb_freelist through tx_link.
132          */
133         struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
134         unsigned short tx_link[NET_TX_RING_SIZE];
135 #define TX_LINK_NONE 0xffff
136 #define TX_PENDING   0xfffe
137         grant_ref_t gref_tx_head;
138         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
139         struct page *grant_tx_page[NET_TX_RING_SIZE];
140         unsigned tx_skb_freelist;
141         unsigned int tx_pend_queue;
142
143         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
144         struct xen_netif_rx_front_ring rx;
145         int rx_ring_ref;
146
147         struct timer_list rx_refill_timer;
148
149         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
150         grant_ref_t gref_rx_head;
151         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
152
153         unsigned int rx_rsp_unconsumed;
154         spinlock_t rx_cons_lock;
155
156         struct page_pool *page_pool;
157         struct xdp_rxq_info xdp_rxq;
158 };
159
160 struct netfront_info {
161         struct list_head list;
162         struct net_device *netdev;
163
164         struct xenbus_device *xbdev;
165
166         /* Multi-queue support */
167         struct netfront_queue *queues;
168
169         /* Statistics */
170         struct netfront_stats __percpu *rx_stats;
171         struct netfront_stats __percpu *tx_stats;
172
173         /* XDP state */
174         bool netback_has_xdp_headroom;
175         bool netfront_xdp_enabled;
176
177         /* Is device behaving sane? */
178         bool broken;
179
180         /* Should skbs be bounced into a zeroed buffer? */
181         bool bounce;
182
183         atomic_t rx_gso_checksum_fixup;
184 };
185
186 struct netfront_rx_info {
187         struct xen_netif_rx_response rx;
188         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
189 };
190
191 /*
192  * Access macros for acquiring freeing slots in tx_skbs[].
193  */
194
195 static void add_id_to_list(unsigned *head, unsigned short *list,
196                            unsigned short id)
197 {
198         list[id] = *head;
199         *head = id;
200 }
201
202 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
203 {
204         unsigned int id = *head;
205
206         if (id != TX_LINK_NONE) {
207                 *head = list[id];
208                 list[id] = TX_LINK_NONE;
209         }
210         return id;
211 }
212
213 static int xennet_rxidx(RING_IDX idx)
214 {
215         return idx & (NET_RX_RING_SIZE - 1);
216 }
217
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
219                                          RING_IDX ri)
220 {
221         int i = xennet_rxidx(ri);
222         struct sk_buff *skb = queue->rx_skbs[i];
223         queue->rx_skbs[i] = NULL;
224         return skb;
225 }
226
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
228                                             RING_IDX ri)
229 {
230         int i = xennet_rxidx(ri);
231         grant_ref_t ref = queue->grant_rx_ref[i];
232         queue->grant_rx_ref[i] = INVALID_GRANT_REF;
233         return ref;
234 }
235
236 #ifdef CONFIG_SYSFS
237 static const struct attribute_group xennet_dev_group;
238 #endif
239
240 static bool xennet_can_sg(struct net_device *dev)
241 {
242         return dev->features & NETIF_F_SG;
243 }
244
245
246 static void rx_refill_timeout(struct timer_list *t)
247 {
248         struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
249         napi_schedule(&queue->napi);
250 }
251
252 static int netfront_tx_slot_available(struct netfront_queue *queue)
253 {
254         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
255                 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
256 }
257
258 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
259 {
260         struct net_device *dev = queue->info->netdev;
261         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
262
263         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
264             netfront_tx_slot_available(queue) &&
265             likely(netif_running(dev)))
266                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
267 }
268
269
270 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
271 {
272         struct sk_buff *skb;
273         struct page *page;
274
275         skb = __netdev_alloc_skb(queue->info->netdev,
276                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
277                                  GFP_ATOMIC | __GFP_NOWARN);
278         if (unlikely(!skb))
279                 return NULL;
280
281         page = page_pool_alloc_pages(queue->page_pool,
282                                      GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
283         if (unlikely(!page)) {
284                 kfree_skb(skb);
285                 return NULL;
286         }
287         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
288
289         /* Align ip header to a 16 bytes boundary */
290         skb_reserve(skb, NET_IP_ALIGN);
291         skb->dev = queue->info->netdev;
292
293         return skb;
294 }
295
296
297 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
298 {
299         RING_IDX req_prod = queue->rx.req_prod_pvt;
300         int notify;
301         int err = 0;
302
303         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
304                 return;
305
306         for (req_prod = queue->rx.req_prod_pvt;
307              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
308              req_prod++) {
309                 struct sk_buff *skb;
310                 unsigned short id;
311                 grant_ref_t ref;
312                 struct page *page;
313                 struct xen_netif_rx_request *req;
314
315                 skb = xennet_alloc_one_rx_buffer(queue);
316                 if (!skb) {
317                         err = -ENOMEM;
318                         break;
319                 }
320
321                 id = xennet_rxidx(req_prod);
322
323                 BUG_ON(queue->rx_skbs[id]);
324                 queue->rx_skbs[id] = skb;
325
326                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
327                 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
328                 queue->grant_rx_ref[id] = ref;
329
330                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
331
332                 req = RING_GET_REQUEST(&queue->rx, req_prod);
333                 gnttab_page_grant_foreign_access_ref_one(ref,
334                                                          queue->info->xbdev->otherend_id,
335                                                          page,
336                                                          0);
337                 req->id = id;
338                 req->gref = ref;
339         }
340
341         queue->rx.req_prod_pvt = req_prod;
342
343         /* Try again later if there are not enough requests or skb allocation
344          * failed.
345          * Enough requests is quantified as the sum of newly created slots and
346          * the unconsumed slots at the backend.
347          */
348         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
349             unlikely(err)) {
350                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
351                 return;
352         }
353
354         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
355         if (notify)
356                 notify_remote_via_irq(queue->rx_irq);
357 }
358
359 static int xennet_open(struct net_device *dev)
360 {
361         struct netfront_info *np = netdev_priv(dev);
362         unsigned int num_queues = dev->real_num_tx_queues;
363         unsigned int i = 0;
364         struct netfront_queue *queue = NULL;
365
366         if (!np->queues || np->broken)
367                 return -ENODEV;
368
369         for (i = 0; i < num_queues; ++i) {
370                 queue = &np->queues[i];
371                 napi_enable(&queue->napi);
372
373                 spin_lock_bh(&queue->rx_lock);
374                 if (netif_carrier_ok(dev)) {
375                         xennet_alloc_rx_buffers(queue);
376                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
377                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
378                                 napi_schedule(&queue->napi);
379                 }
380                 spin_unlock_bh(&queue->rx_lock);
381         }
382
383         netif_tx_start_all_queues(dev);
384
385         return 0;
386 }
387
388 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
389 {
390         RING_IDX cons, prod;
391         unsigned short id;
392         struct sk_buff *skb;
393         bool more_to_do;
394         bool work_done = false;
395         const struct device *dev = &queue->info->netdev->dev;
396
397         BUG_ON(!netif_carrier_ok(queue->info->netdev));
398
399         do {
400                 prod = queue->tx.sring->rsp_prod;
401                 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
402                         dev_alert(dev, "Illegal number of responses %u\n",
403                                   prod - queue->tx.rsp_cons);
404                         goto err;
405                 }
406                 rmb(); /* Ensure we see responses up to 'rp'. */
407
408                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
409                         struct xen_netif_tx_response txrsp;
410
411                         work_done = true;
412
413                         RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
414                         if (txrsp.status == XEN_NETIF_RSP_NULL)
415                                 continue;
416
417                         id = txrsp.id;
418                         if (id >= RING_SIZE(&queue->tx)) {
419                                 dev_alert(dev,
420                                           "Response has incorrect id (%u)\n",
421                                           id);
422                                 goto err;
423                         }
424                         if (queue->tx_link[id] != TX_PENDING) {
425                                 dev_alert(dev,
426                                           "Response for inactive request\n");
427                                 goto err;
428                         }
429
430                         queue->tx_link[id] = TX_LINK_NONE;
431                         skb = queue->tx_skbs[id];
432                         queue->tx_skbs[id] = NULL;
433                         if (unlikely(!gnttab_end_foreign_access_ref(
434                                 queue->grant_tx_ref[id]))) {
435                                 dev_alert(dev,
436                                           "Grant still in use by backend domain\n");
437                                 goto err;
438                         }
439                         gnttab_release_grant_reference(
440                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
441                         queue->grant_tx_ref[id] = INVALID_GRANT_REF;
442                         queue->grant_tx_page[id] = NULL;
443                         add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
444                         dev_kfree_skb_irq(skb);
445                 }
446
447                 queue->tx.rsp_cons = prod;
448
449                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
450         } while (more_to_do);
451
452         xennet_maybe_wake_tx(queue);
453
454         return work_done;
455
456  err:
457         queue->info->broken = true;
458         dev_alert(dev, "Disabled for further use\n");
459
460         return work_done;
461 }
462
463 struct xennet_gnttab_make_txreq {
464         struct netfront_queue *queue;
465         struct sk_buff *skb;
466         struct page *page;
467         struct xen_netif_tx_request *tx;      /* Last request on ring page */
468         struct xen_netif_tx_request tx_local; /* Last request local copy*/
469         unsigned int size;
470 };
471
472 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
473                                   unsigned int len, void *data)
474 {
475         struct xennet_gnttab_make_txreq *info = data;
476         unsigned int id;
477         struct xen_netif_tx_request *tx;
478         grant_ref_t ref;
479         /* convenient aliases */
480         struct page *page = info->page;
481         struct netfront_queue *queue = info->queue;
482         struct sk_buff *skb = info->skb;
483
484         id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
485         tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
486         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
487         WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
488
489         gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
490                                         gfn, GNTMAP_readonly);
491
492         queue->tx_skbs[id] = skb;
493         queue->grant_tx_page[id] = page;
494         queue->grant_tx_ref[id] = ref;
495
496         info->tx_local.id = id;
497         info->tx_local.gref = ref;
498         info->tx_local.offset = offset;
499         info->tx_local.size = len;
500         info->tx_local.flags = 0;
501
502         *tx = info->tx_local;
503
504         /*
505          * Put the request in the pending queue, it will be set to be pending
506          * when the producer index is about to be raised.
507          */
508         add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
509
510         info->tx = tx;
511         info->size += info->tx_local.size;
512 }
513
514 static struct xen_netif_tx_request *xennet_make_first_txreq(
515         struct xennet_gnttab_make_txreq *info,
516         unsigned int offset, unsigned int len)
517 {
518         info->size = 0;
519
520         gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
521
522         return info->tx;
523 }
524
525 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
526                                   unsigned int len, void *data)
527 {
528         struct xennet_gnttab_make_txreq *info = data;
529
530         info->tx->flags |= XEN_NETTXF_more_data;
531         skb_get(info->skb);
532         xennet_tx_setup_grant(gfn, offset, len, data);
533 }
534
535 static void xennet_make_txreqs(
536         struct xennet_gnttab_make_txreq *info,
537         struct page *page,
538         unsigned int offset, unsigned int len)
539 {
540         /* Skip unused frames from start of page */
541         page += offset >> PAGE_SHIFT;
542         offset &= ~PAGE_MASK;
543
544         while (len) {
545                 info->page = page;
546                 info->size = 0;
547
548                 gnttab_foreach_grant_in_range(page, offset, len,
549                                               xennet_make_one_txreq,
550                                               info);
551
552                 page++;
553                 offset = 0;
554                 len -= info->size;
555         }
556 }
557
558 /*
559  * Count how many ring slots are required to send this skb. Each frag
560  * might be a compound page.
561  */
562 static int xennet_count_skb_slots(struct sk_buff *skb)
563 {
564         int i, frags = skb_shinfo(skb)->nr_frags;
565         int slots;
566
567         slots = gnttab_count_grant(offset_in_page(skb->data),
568                                    skb_headlen(skb));
569
570         for (i = 0; i < frags; i++) {
571                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
572                 unsigned long size = skb_frag_size(frag);
573                 unsigned long offset = skb_frag_off(frag);
574
575                 /* Skip unused frames from start of page */
576                 offset &= ~PAGE_MASK;
577
578                 slots += gnttab_count_grant(offset, size);
579         }
580
581         return slots;
582 }
583
584 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
585                                struct net_device *sb_dev)
586 {
587         unsigned int num_queues = dev->real_num_tx_queues;
588         u32 hash;
589         u16 queue_idx;
590
591         /* First, check if there is only one queue */
592         if (num_queues == 1) {
593                 queue_idx = 0;
594         } else {
595                 hash = skb_get_hash(skb);
596                 queue_idx = hash % num_queues;
597         }
598
599         return queue_idx;
600 }
601
602 static void xennet_mark_tx_pending(struct netfront_queue *queue)
603 {
604         unsigned int i;
605
606         while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
607                TX_LINK_NONE)
608                 queue->tx_link[i] = TX_PENDING;
609 }
610
611 static int xennet_xdp_xmit_one(struct net_device *dev,
612                                struct netfront_queue *queue,
613                                struct xdp_frame *xdpf)
614 {
615         struct netfront_info *np = netdev_priv(dev);
616         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
617         struct xennet_gnttab_make_txreq info = {
618                 .queue = queue,
619                 .skb = NULL,
620                 .page = virt_to_page(xdpf->data),
621         };
622         int notify;
623
624         xennet_make_first_txreq(&info,
625                                 offset_in_page(xdpf->data),
626                                 xdpf->len);
627
628         xennet_mark_tx_pending(queue);
629
630         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
631         if (notify)
632                 notify_remote_via_irq(queue->tx_irq);
633
634         u64_stats_update_begin(&tx_stats->syncp);
635         tx_stats->bytes += xdpf->len;
636         tx_stats->packets++;
637         u64_stats_update_end(&tx_stats->syncp);
638
639         xennet_tx_buf_gc(queue);
640
641         return 0;
642 }
643
644 static int xennet_xdp_xmit(struct net_device *dev, int n,
645                            struct xdp_frame **frames, u32 flags)
646 {
647         unsigned int num_queues = dev->real_num_tx_queues;
648         struct netfront_info *np = netdev_priv(dev);
649         struct netfront_queue *queue = NULL;
650         unsigned long irq_flags;
651         int nxmit = 0;
652         int i;
653
654         if (unlikely(np->broken))
655                 return -ENODEV;
656         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
657                 return -EINVAL;
658
659         queue = &np->queues[smp_processor_id() % num_queues];
660
661         spin_lock_irqsave(&queue->tx_lock, irq_flags);
662         for (i = 0; i < n; i++) {
663                 struct xdp_frame *xdpf = frames[i];
664
665                 if (!xdpf)
666                         continue;
667                 if (xennet_xdp_xmit_one(dev, queue, xdpf))
668                         break;
669                 nxmit++;
670         }
671         spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
672
673         return nxmit;
674 }
675
676 static struct sk_buff *bounce_skb(const struct sk_buff *skb)
677 {
678         unsigned int headerlen = skb_headroom(skb);
679         /* Align size to allocate full pages and avoid contiguous data leaks */
680         unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
681                                   XEN_PAGE_SIZE);
682         struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
683
684         if (!n)
685                 return NULL;
686
687         if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
688                 WARN_ONCE(1, "misaligned skb allocated\n");
689                 kfree_skb(n);
690                 return NULL;
691         }
692
693         /* Set the data pointer */
694         skb_reserve(n, headerlen);
695         /* Set the tail pointer and length */
696         skb_put(n, skb->len);
697
698         BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
699
700         skb_copy_header(n, skb);
701         return n;
702 }
703
704 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
705
706 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
707 {
708         struct netfront_info *np = netdev_priv(dev);
709         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
710         struct xen_netif_tx_request *first_tx;
711         unsigned int i;
712         int notify;
713         int slots;
714         struct page *page;
715         unsigned int offset;
716         unsigned int len;
717         unsigned long flags;
718         struct netfront_queue *queue = NULL;
719         struct xennet_gnttab_make_txreq info = { };
720         unsigned int num_queues = dev->real_num_tx_queues;
721         u16 queue_index;
722         struct sk_buff *nskb;
723
724         /* Drop the packet if no queues are set up */
725         if (num_queues < 1)
726                 goto drop;
727         if (unlikely(np->broken))
728                 goto drop;
729         /* Determine which queue to transmit this SKB on */
730         queue_index = skb_get_queue_mapping(skb);
731         queue = &np->queues[queue_index];
732
733         /* If skb->len is too big for wire format, drop skb and alert
734          * user about misconfiguration.
735          */
736         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
737                 net_alert_ratelimited(
738                         "xennet: skb->len = %u, too big for wire format\n",
739                         skb->len);
740                 goto drop;
741         }
742
743         slots = xennet_count_skb_slots(skb);
744         if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
745                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
746                                     slots, skb->len);
747                 if (skb_linearize(skb))
748                         goto drop;
749         }
750
751         page = virt_to_page(skb->data);
752         offset = offset_in_page(skb->data);
753
754         /* The first req should be at least ETH_HLEN size or the packet will be
755          * dropped by netback.
756          *
757          * If the backend is not trusted bounce all data to zeroed pages to
758          * avoid exposing contiguous data on the granted page not belonging to
759          * the skb.
760          */
761         if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
762                 nskb = bounce_skb(skb);
763                 if (!nskb)
764                         goto drop;
765                 dev_consume_skb_any(skb);
766                 skb = nskb;
767                 page = virt_to_page(skb->data);
768                 offset = offset_in_page(skb->data);
769         }
770
771         len = skb_headlen(skb);
772
773         spin_lock_irqsave(&queue->tx_lock, flags);
774
775         if (unlikely(!netif_carrier_ok(dev) ||
776                      (slots > 1 && !xennet_can_sg(dev)) ||
777                      netif_needs_gso(skb, netif_skb_features(skb)))) {
778                 spin_unlock_irqrestore(&queue->tx_lock, flags);
779                 goto drop;
780         }
781
782         /* First request for the linear area. */
783         info.queue = queue;
784         info.skb = skb;
785         info.page = page;
786         first_tx = xennet_make_first_txreq(&info, offset, len);
787         offset += info.tx_local.size;
788         if (offset == PAGE_SIZE) {
789                 page++;
790                 offset = 0;
791         }
792         len -= info.tx_local.size;
793
794         if (skb->ip_summed == CHECKSUM_PARTIAL)
795                 /* local packet? */
796                 first_tx->flags |= XEN_NETTXF_csum_blank |
797                                    XEN_NETTXF_data_validated;
798         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
799                 /* remote but checksummed. */
800                 first_tx->flags |= XEN_NETTXF_data_validated;
801
802         /* Optional extra info after the first request. */
803         if (skb_shinfo(skb)->gso_size) {
804                 struct xen_netif_extra_info *gso;
805
806                 gso = (struct xen_netif_extra_info *)
807                         RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
808
809                 first_tx->flags |= XEN_NETTXF_extra_info;
810
811                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
812                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
813                         XEN_NETIF_GSO_TYPE_TCPV6 :
814                         XEN_NETIF_GSO_TYPE_TCPV4;
815                 gso->u.gso.pad = 0;
816                 gso->u.gso.features = 0;
817
818                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
819                 gso->flags = 0;
820         }
821
822         /* Requests for the rest of the linear area. */
823         xennet_make_txreqs(&info, page, offset, len);
824
825         /* Requests for all the frags. */
826         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
827                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
828                 xennet_make_txreqs(&info, skb_frag_page(frag),
829                                         skb_frag_off(frag),
830                                         skb_frag_size(frag));
831         }
832
833         /* First request has the packet length. */
834         first_tx->size = skb->len;
835
836         /* timestamp packet in software */
837         skb_tx_timestamp(skb);
838
839         xennet_mark_tx_pending(queue);
840
841         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
842         if (notify)
843                 notify_remote_via_irq(queue->tx_irq);
844
845         u64_stats_update_begin(&tx_stats->syncp);
846         tx_stats->bytes += skb->len;
847         tx_stats->packets++;
848         u64_stats_update_end(&tx_stats->syncp);
849
850         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
851         xennet_tx_buf_gc(queue);
852
853         if (!netfront_tx_slot_available(queue))
854                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
855
856         spin_unlock_irqrestore(&queue->tx_lock, flags);
857
858         return NETDEV_TX_OK;
859
860  drop:
861         dev->stats.tx_dropped++;
862         dev_kfree_skb_any(skb);
863         return NETDEV_TX_OK;
864 }
865
866 static int xennet_close(struct net_device *dev)
867 {
868         struct netfront_info *np = netdev_priv(dev);
869         unsigned int num_queues = dev->real_num_tx_queues;
870         unsigned int i;
871         struct netfront_queue *queue;
872         netif_tx_stop_all_queues(np->netdev);
873         for (i = 0; i < num_queues; ++i) {
874                 queue = &np->queues[i];
875                 napi_disable(&queue->napi);
876         }
877         return 0;
878 }
879
880 static void xennet_destroy_queues(struct netfront_info *info)
881 {
882         unsigned int i;
883
884         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
885                 struct netfront_queue *queue = &info->queues[i];
886
887                 if (netif_running(info->netdev))
888                         napi_disable(&queue->napi);
889                 netif_napi_del(&queue->napi);
890         }
891
892         kfree(info->queues);
893         info->queues = NULL;
894 }
895
896 static void xennet_uninit(struct net_device *dev)
897 {
898         struct netfront_info *np = netdev_priv(dev);
899         xennet_destroy_queues(np);
900 }
901
902 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
903 {
904         unsigned long flags;
905
906         spin_lock_irqsave(&queue->rx_cons_lock, flags);
907         queue->rx.rsp_cons = val;
908         queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
909         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
910 }
911
912 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
913                                 grant_ref_t ref)
914 {
915         int new = xennet_rxidx(queue->rx.req_prod_pvt);
916
917         BUG_ON(queue->rx_skbs[new]);
918         queue->rx_skbs[new] = skb;
919         queue->grant_rx_ref[new] = ref;
920         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
921         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
922         queue->rx.req_prod_pvt++;
923 }
924
925 static int xennet_get_extras(struct netfront_queue *queue,
926                              struct xen_netif_extra_info *extras,
927                              RING_IDX rp)
928
929 {
930         struct xen_netif_extra_info extra;
931         struct device *dev = &queue->info->netdev->dev;
932         RING_IDX cons = queue->rx.rsp_cons;
933         int err = 0;
934
935         do {
936                 struct sk_buff *skb;
937                 grant_ref_t ref;
938
939                 if (unlikely(cons + 1 == rp)) {
940                         if (net_ratelimit())
941                                 dev_warn(dev, "Missing extra info\n");
942                         err = -EBADR;
943                         break;
944                 }
945
946                 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
947
948                 if (unlikely(!extra.type ||
949                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
950                         if (net_ratelimit())
951                                 dev_warn(dev, "Invalid extra type: %d\n",
952                                          extra.type);
953                         err = -EINVAL;
954                 } else {
955                         extras[extra.type - 1] = extra;
956                 }
957
958                 skb = xennet_get_rx_skb(queue, cons);
959                 ref = xennet_get_rx_ref(queue, cons);
960                 xennet_move_rx_slot(queue, skb, ref);
961         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
962
963         xennet_set_rx_rsp_cons(queue, cons);
964         return err;
965 }
966
967 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
968                    struct xen_netif_rx_response *rx, struct bpf_prog *prog,
969                    struct xdp_buff *xdp, bool *need_xdp_flush)
970 {
971         struct xdp_frame *xdpf;
972         u32 len = rx->status;
973         u32 act;
974         int err;
975
976         xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
977                       &queue->xdp_rxq);
978         xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
979                          len, false);
980
981         act = bpf_prog_run_xdp(prog, xdp);
982         switch (act) {
983         case XDP_TX:
984                 get_page(pdata);
985                 xdpf = xdp_convert_buff_to_frame(xdp);
986                 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
987                 if (unlikely(!err))
988                         xdp_return_frame_rx_napi(xdpf);
989                 else if (unlikely(err < 0))
990                         trace_xdp_exception(queue->info->netdev, prog, act);
991                 break;
992         case XDP_REDIRECT:
993                 get_page(pdata);
994                 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
995                 *need_xdp_flush = true;
996                 if (unlikely(err))
997                         trace_xdp_exception(queue->info->netdev, prog, act);
998                 break;
999         case XDP_PASS:
1000         case XDP_DROP:
1001                 break;
1002
1003         case XDP_ABORTED:
1004                 trace_xdp_exception(queue->info->netdev, prog, act);
1005                 break;
1006
1007         default:
1008                 bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1009         }
1010
1011         return act;
1012 }
1013
1014 static int xennet_get_responses(struct netfront_queue *queue,
1015                                 struct netfront_rx_info *rinfo, RING_IDX rp,
1016                                 struct sk_buff_head *list,
1017                                 bool *need_xdp_flush)
1018 {
1019         struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1020         int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
1021         RING_IDX cons = queue->rx.rsp_cons;
1022         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1023         struct xen_netif_extra_info *extras = rinfo->extras;
1024         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1025         struct device *dev = &queue->info->netdev->dev;
1026         struct bpf_prog *xdp_prog;
1027         struct xdp_buff xdp;
1028         int slots = 1;
1029         int err = 0;
1030         u32 verdict;
1031
1032         if (rx->flags & XEN_NETRXF_extra_info) {
1033                 err = xennet_get_extras(queue, extras, rp);
1034                 if (!err) {
1035                         if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1036                                 struct xen_netif_extra_info *xdp;
1037
1038                                 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1039                                 rx->offset = xdp->u.xdp.headroom;
1040                         }
1041                 }
1042                 cons = queue->rx.rsp_cons;
1043         }
1044
1045         for (;;) {
1046                 /*
1047                  * This definitely indicates a bug, either in this driver or in
1048                  * the backend driver. In future this should flag the bad
1049                  * situation to the system controller to reboot the backend.
1050                  */
1051                 if (ref == INVALID_GRANT_REF) {
1052                         if (net_ratelimit())
1053                                 dev_warn(dev, "Bad rx response id %d.\n",
1054                                          rx->id);
1055                         err = -EINVAL;
1056                         goto next;
1057                 }
1058
1059                 if (unlikely(rx->status < 0 ||
1060                              rx->offset + rx->status > XEN_PAGE_SIZE)) {
1061                         if (net_ratelimit())
1062                                 dev_warn(dev, "rx->offset: %u, size: %d\n",
1063                                          rx->offset, rx->status);
1064                         xennet_move_rx_slot(queue, skb, ref);
1065                         err = -EINVAL;
1066                         goto next;
1067                 }
1068
1069                 if (!gnttab_end_foreign_access_ref(ref)) {
1070                         dev_alert(dev,
1071                                   "Grant still in use by backend domain\n");
1072                         queue->info->broken = true;
1073                         dev_alert(dev, "Disabled for further use\n");
1074                         return -EINVAL;
1075                 }
1076
1077                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1078
1079                 rcu_read_lock();
1080                 xdp_prog = rcu_dereference(queue->xdp_prog);
1081                 if (xdp_prog) {
1082                         if (!(rx->flags & XEN_NETRXF_more_data)) {
1083                                 /* currently only a single page contains data */
1084                                 verdict = xennet_run_xdp(queue,
1085                                                          skb_frag_page(&skb_shinfo(skb)->frags[0]),
1086                                                          rx, xdp_prog, &xdp, need_xdp_flush);
1087                                 if (verdict != XDP_PASS)
1088                                         err = -EINVAL;
1089                         } else {
1090                                 /* drop the frame */
1091                                 err = -EINVAL;
1092                         }
1093                 }
1094                 rcu_read_unlock();
1095
1096                 __skb_queue_tail(list, skb);
1097
1098 next:
1099                 if (!(rx->flags & XEN_NETRXF_more_data))
1100                         break;
1101
1102                 if (cons + slots == rp) {
1103                         if (net_ratelimit())
1104                                 dev_warn(dev, "Need more slots\n");
1105                         err = -ENOENT;
1106                         break;
1107                 }
1108
1109                 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1110                 rx = &rx_local;
1111                 skb = xennet_get_rx_skb(queue, cons + slots);
1112                 ref = xennet_get_rx_ref(queue, cons + slots);
1113                 slots++;
1114         }
1115
1116         if (unlikely(slots > max)) {
1117                 if (net_ratelimit())
1118                         dev_warn(dev, "Too many slots\n");
1119                 err = -E2BIG;
1120         }
1121
1122         if (unlikely(err))
1123                 xennet_set_rx_rsp_cons(queue, cons + slots);
1124
1125         return err;
1126 }
1127
1128 static int xennet_set_skb_gso(struct sk_buff *skb,
1129                               struct xen_netif_extra_info *gso)
1130 {
1131         if (!gso->u.gso.size) {
1132                 if (net_ratelimit())
1133                         pr_warn("GSO size must not be zero\n");
1134                 return -EINVAL;
1135         }
1136
1137         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1138             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1139                 if (net_ratelimit())
1140                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1141                 return -EINVAL;
1142         }
1143
1144         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1145         skb_shinfo(skb)->gso_type =
1146                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1147                 SKB_GSO_TCPV4 :
1148                 SKB_GSO_TCPV6;
1149
1150         /* Header must be checked, and gso_segs computed. */
1151         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1152         skb_shinfo(skb)->gso_segs = 0;
1153
1154         return 0;
1155 }
1156
1157 static int xennet_fill_frags(struct netfront_queue *queue,
1158                              struct sk_buff *skb,
1159                              struct sk_buff_head *list)
1160 {
1161         RING_IDX cons = queue->rx.rsp_cons;
1162         struct sk_buff *nskb;
1163
1164         while ((nskb = __skb_dequeue(list))) {
1165                 struct xen_netif_rx_response rx;
1166                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1167
1168                 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1169
1170                 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1171                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1172
1173                         BUG_ON(pull_to < skb_headlen(skb));
1174                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1175                 }
1176                 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1177                         xennet_set_rx_rsp_cons(queue,
1178                                                ++cons + skb_queue_len(list));
1179                         kfree_skb(nskb);
1180                         return -ENOENT;
1181                 }
1182
1183                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1184                                 skb_frag_page(nfrag),
1185                                 rx.offset, rx.status, PAGE_SIZE);
1186
1187                 skb_shinfo(nskb)->nr_frags = 0;
1188                 kfree_skb(nskb);
1189         }
1190
1191         xennet_set_rx_rsp_cons(queue, cons);
1192
1193         return 0;
1194 }
1195
1196 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1197 {
1198         bool recalculate_partial_csum = false;
1199
1200         /*
1201          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1202          * peers can fail to set NETRXF_csum_blank when sending a GSO
1203          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1204          * recalculate the partial checksum.
1205          */
1206         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1207                 struct netfront_info *np = netdev_priv(dev);
1208                 atomic_inc(&np->rx_gso_checksum_fixup);
1209                 skb->ip_summed = CHECKSUM_PARTIAL;
1210                 recalculate_partial_csum = true;
1211         }
1212
1213         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1214         if (skb->ip_summed != CHECKSUM_PARTIAL)
1215                 return 0;
1216
1217         return skb_checksum_setup(skb, recalculate_partial_csum);
1218 }
1219
1220 static int handle_incoming_queue(struct netfront_queue *queue,
1221                                  struct sk_buff_head *rxq)
1222 {
1223         struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1224         int packets_dropped = 0;
1225         struct sk_buff *skb;
1226
1227         while ((skb = __skb_dequeue(rxq)) != NULL) {
1228                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1229
1230                 if (pull_to > skb_headlen(skb))
1231                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1232
1233                 /* Ethernet work: Delayed to here as it peeks the header. */
1234                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1235                 skb_reset_network_header(skb);
1236
1237                 if (checksum_setup(queue->info->netdev, skb)) {
1238                         kfree_skb(skb);
1239                         packets_dropped++;
1240                         queue->info->netdev->stats.rx_errors++;
1241                         continue;
1242                 }
1243
1244                 u64_stats_update_begin(&rx_stats->syncp);
1245                 rx_stats->packets++;
1246                 rx_stats->bytes += skb->len;
1247                 u64_stats_update_end(&rx_stats->syncp);
1248
1249                 /* Pass it up. */
1250                 napi_gro_receive(&queue->napi, skb);
1251         }
1252
1253         return packets_dropped;
1254 }
1255
1256 static int xennet_poll(struct napi_struct *napi, int budget)
1257 {
1258         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1259         struct net_device *dev = queue->info->netdev;
1260         struct sk_buff *skb;
1261         struct netfront_rx_info rinfo;
1262         struct xen_netif_rx_response *rx = &rinfo.rx;
1263         struct xen_netif_extra_info *extras = rinfo.extras;
1264         RING_IDX i, rp;
1265         int work_done;
1266         struct sk_buff_head rxq;
1267         struct sk_buff_head errq;
1268         struct sk_buff_head tmpq;
1269         int err;
1270         bool need_xdp_flush = false;
1271
1272         spin_lock(&queue->rx_lock);
1273
1274         skb_queue_head_init(&rxq);
1275         skb_queue_head_init(&errq);
1276         skb_queue_head_init(&tmpq);
1277
1278         rp = queue->rx.sring->rsp_prod;
1279         if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1280                 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1281                           rp - queue->rx.rsp_cons);
1282                 queue->info->broken = true;
1283                 spin_unlock(&queue->rx_lock);
1284                 return 0;
1285         }
1286         rmb(); /* Ensure we see queued responses up to 'rp'. */
1287
1288         i = queue->rx.rsp_cons;
1289         work_done = 0;
1290         while ((i != rp) && (work_done < budget)) {
1291                 RING_COPY_RESPONSE(&queue->rx, i, rx);
1292                 memset(extras, 0, sizeof(rinfo.extras));
1293
1294                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1295                                            &need_xdp_flush);
1296
1297                 if (unlikely(err)) {
1298                         if (queue->info->broken) {
1299                                 spin_unlock(&queue->rx_lock);
1300                                 return 0;
1301                         }
1302 err:
1303                         while ((skb = __skb_dequeue(&tmpq)))
1304                                 __skb_queue_tail(&errq, skb);
1305                         dev->stats.rx_errors++;
1306                         i = queue->rx.rsp_cons;
1307                         continue;
1308                 }
1309
1310                 skb = __skb_dequeue(&tmpq);
1311
1312                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1313                         struct xen_netif_extra_info *gso;
1314                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1315
1316                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1317                                 __skb_queue_head(&tmpq, skb);
1318                                 xennet_set_rx_rsp_cons(queue,
1319                                                        queue->rx.rsp_cons +
1320                                                        skb_queue_len(&tmpq));
1321                                 goto err;
1322                         }
1323                 }
1324
1325                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1326                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1327                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1328
1329                 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1330                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1331                 skb->data_len = rx->status;
1332                 skb->len += rx->status;
1333
1334                 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1335                         goto err;
1336
1337                 if (rx->flags & XEN_NETRXF_csum_blank)
1338                         skb->ip_summed = CHECKSUM_PARTIAL;
1339                 else if (rx->flags & XEN_NETRXF_data_validated)
1340                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1341
1342                 __skb_queue_tail(&rxq, skb);
1343
1344                 i = queue->rx.rsp_cons + 1;
1345                 xennet_set_rx_rsp_cons(queue, i);
1346                 work_done++;
1347         }
1348         if (need_xdp_flush)
1349                 xdp_do_flush();
1350
1351         __skb_queue_purge(&errq);
1352
1353         work_done -= handle_incoming_queue(queue, &rxq);
1354
1355         xennet_alloc_rx_buffers(queue);
1356
1357         if (work_done < budget) {
1358                 int more_to_do = 0;
1359
1360                 napi_complete_done(napi, work_done);
1361
1362                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1363                 if (more_to_do)
1364                         napi_schedule(napi);
1365         }
1366
1367         spin_unlock(&queue->rx_lock);
1368
1369         return work_done;
1370 }
1371
1372 static int xennet_change_mtu(struct net_device *dev, int mtu)
1373 {
1374         int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1375
1376         if (mtu > max)
1377                 return -EINVAL;
1378         dev->mtu = mtu;
1379         return 0;
1380 }
1381
1382 static void xennet_get_stats64(struct net_device *dev,
1383                                struct rtnl_link_stats64 *tot)
1384 {
1385         struct netfront_info *np = netdev_priv(dev);
1386         int cpu;
1387
1388         for_each_possible_cpu(cpu) {
1389                 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1390                 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1391                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1392                 unsigned int start;
1393
1394                 do {
1395                         start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1396                         tx_packets = tx_stats->packets;
1397                         tx_bytes = tx_stats->bytes;
1398                 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1399
1400                 do {
1401                         start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1402                         rx_packets = rx_stats->packets;
1403                         rx_bytes = rx_stats->bytes;
1404                 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1405
1406                 tot->rx_packets += rx_packets;
1407                 tot->tx_packets += tx_packets;
1408                 tot->rx_bytes   += rx_bytes;
1409                 tot->tx_bytes   += tx_bytes;
1410         }
1411
1412         tot->rx_errors  = dev->stats.rx_errors;
1413         tot->tx_dropped = dev->stats.tx_dropped;
1414 }
1415
1416 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1417 {
1418         struct sk_buff *skb;
1419         int i;
1420
1421         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1422                 /* Skip over entries which are actually freelist references */
1423                 if (!queue->tx_skbs[i])
1424                         continue;
1425
1426                 skb = queue->tx_skbs[i];
1427                 queue->tx_skbs[i] = NULL;
1428                 get_page(queue->grant_tx_page[i]);
1429                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1430                                           queue->grant_tx_page[i]);
1431                 queue->grant_tx_page[i] = NULL;
1432                 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1433                 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1434                 dev_kfree_skb_irq(skb);
1435         }
1436 }
1437
1438 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1439 {
1440         int id, ref;
1441
1442         spin_lock_bh(&queue->rx_lock);
1443
1444         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1445                 struct sk_buff *skb;
1446                 struct page *page;
1447
1448                 skb = queue->rx_skbs[id];
1449                 if (!skb)
1450                         continue;
1451
1452                 ref = queue->grant_rx_ref[id];
1453                 if (ref == INVALID_GRANT_REF)
1454                         continue;
1455
1456                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1457
1458                 /* gnttab_end_foreign_access() needs a page ref until
1459                  * foreign access is ended (which may be deferred).
1460                  */
1461                 get_page(page);
1462                 gnttab_end_foreign_access(ref, page);
1463                 queue->grant_rx_ref[id] = INVALID_GRANT_REF;
1464
1465                 kfree_skb(skb);
1466         }
1467
1468         spin_unlock_bh(&queue->rx_lock);
1469 }
1470
1471 static netdev_features_t xennet_fix_features(struct net_device *dev,
1472         netdev_features_t features)
1473 {
1474         struct netfront_info *np = netdev_priv(dev);
1475
1476         if (features & NETIF_F_SG &&
1477             !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1478                 features &= ~NETIF_F_SG;
1479
1480         if (features & NETIF_F_IPV6_CSUM &&
1481             !xenbus_read_unsigned(np->xbdev->otherend,
1482                                   "feature-ipv6-csum-offload", 0))
1483                 features &= ~NETIF_F_IPV6_CSUM;
1484
1485         if (features & NETIF_F_TSO &&
1486             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1487                 features &= ~NETIF_F_TSO;
1488
1489         if (features & NETIF_F_TSO6 &&
1490             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1491                 features &= ~NETIF_F_TSO6;
1492
1493         return features;
1494 }
1495
1496 static int xennet_set_features(struct net_device *dev,
1497         netdev_features_t features)
1498 {
1499         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1500                 netdev_info(dev, "Reducing MTU because no SG offload");
1501                 dev->mtu = ETH_DATA_LEN;
1502         }
1503
1504         return 0;
1505 }
1506
1507 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1508 {
1509         unsigned long flags;
1510
1511         if (unlikely(queue->info->broken))
1512                 return false;
1513
1514         spin_lock_irqsave(&queue->tx_lock, flags);
1515         if (xennet_tx_buf_gc(queue))
1516                 *eoi = 0;
1517         spin_unlock_irqrestore(&queue->tx_lock, flags);
1518
1519         return true;
1520 }
1521
1522 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1523 {
1524         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1525
1526         if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1527                 xen_irq_lateeoi(irq, eoiflag);
1528
1529         return IRQ_HANDLED;
1530 }
1531
1532 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1533 {
1534         unsigned int work_queued;
1535         unsigned long flags;
1536
1537         if (unlikely(queue->info->broken))
1538                 return false;
1539
1540         spin_lock_irqsave(&queue->rx_cons_lock, flags);
1541         work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1542         if (work_queued > queue->rx_rsp_unconsumed) {
1543                 queue->rx_rsp_unconsumed = work_queued;
1544                 *eoi = 0;
1545         } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1546                 const struct device *dev = &queue->info->netdev->dev;
1547
1548                 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1549                 dev_alert(dev, "RX producer index going backwards\n");
1550                 dev_alert(dev, "Disabled for further use\n");
1551                 queue->info->broken = true;
1552                 return false;
1553         }
1554         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1555
1556         if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1557                 napi_schedule(&queue->napi);
1558
1559         return true;
1560 }
1561
1562 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1563 {
1564         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1565
1566         if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1567                 xen_irq_lateeoi(irq, eoiflag);
1568
1569         return IRQ_HANDLED;
1570 }
1571
1572 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1573 {
1574         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1575
1576         if (xennet_handle_tx(dev_id, &eoiflag) &&
1577             xennet_handle_rx(dev_id, &eoiflag))
1578                 xen_irq_lateeoi(irq, eoiflag);
1579
1580         return IRQ_HANDLED;
1581 }
1582
1583 #ifdef CONFIG_NET_POLL_CONTROLLER
1584 static void xennet_poll_controller(struct net_device *dev)
1585 {
1586         /* Poll each queue */
1587         struct netfront_info *info = netdev_priv(dev);
1588         unsigned int num_queues = dev->real_num_tx_queues;
1589         unsigned int i;
1590
1591         if (info->broken)
1592                 return;
1593
1594         for (i = 0; i < num_queues; ++i)
1595                 xennet_interrupt(0, &info->queues[i]);
1596 }
1597 #endif
1598
1599 #define NETBACK_XDP_HEADROOM_DISABLE    0
1600 #define NETBACK_XDP_HEADROOM_ENABLE     1
1601
1602 static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1603 {
1604         int err;
1605         unsigned short headroom;
1606
1607         headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1608         err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1609                             "xdp-headroom", "%hu",
1610                             headroom);
1611         if (err)
1612                 pr_warn("Error writing xdp-headroom\n");
1613
1614         return err;
1615 }
1616
1617 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1618                           struct netlink_ext_ack *extack)
1619 {
1620         unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1621         struct netfront_info *np = netdev_priv(dev);
1622         struct bpf_prog *old_prog;
1623         unsigned int i, err;
1624
1625         if (dev->mtu > max_mtu) {
1626                 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1627                 return -EINVAL;
1628         }
1629
1630         if (!np->netback_has_xdp_headroom)
1631                 return 0;
1632
1633         xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1634
1635         err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1636                                   NETBACK_XDP_HEADROOM_DISABLE);
1637         if (err)
1638                 return err;
1639
1640         /* avoid the race with XDP headroom adjustment */
1641         wait_event(module_wq,
1642                    xenbus_read_driver_state(np->xbdev->otherend) ==
1643                    XenbusStateReconfigured);
1644         np->netfront_xdp_enabled = true;
1645
1646         old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1647
1648         if (prog)
1649                 bpf_prog_add(prog, dev->real_num_tx_queues);
1650
1651         for (i = 0; i < dev->real_num_tx_queues; ++i)
1652                 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1653
1654         if (old_prog)
1655                 for (i = 0; i < dev->real_num_tx_queues; ++i)
1656                         bpf_prog_put(old_prog);
1657
1658         xenbus_switch_state(np->xbdev, XenbusStateConnected);
1659
1660         return 0;
1661 }
1662
1663 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1664 {
1665         struct netfront_info *np = netdev_priv(dev);
1666
1667         if (np->broken)
1668                 return -ENODEV;
1669
1670         switch (xdp->command) {
1671         case XDP_SETUP_PROG:
1672                 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1673         default:
1674                 return -EINVAL;
1675         }
1676 }
1677
1678 static const struct net_device_ops xennet_netdev_ops = {
1679         .ndo_uninit          = xennet_uninit,
1680         .ndo_open            = xennet_open,
1681         .ndo_stop            = xennet_close,
1682         .ndo_start_xmit      = xennet_start_xmit,
1683         .ndo_change_mtu      = xennet_change_mtu,
1684         .ndo_get_stats64     = xennet_get_stats64,
1685         .ndo_set_mac_address = eth_mac_addr,
1686         .ndo_validate_addr   = eth_validate_addr,
1687         .ndo_fix_features    = xennet_fix_features,
1688         .ndo_set_features    = xennet_set_features,
1689         .ndo_select_queue    = xennet_select_queue,
1690         .ndo_bpf            = xennet_xdp,
1691         .ndo_xdp_xmit       = xennet_xdp_xmit,
1692 #ifdef CONFIG_NET_POLL_CONTROLLER
1693         .ndo_poll_controller = xennet_poll_controller,
1694 #endif
1695 };
1696
1697 static void xennet_free_netdev(struct net_device *netdev)
1698 {
1699         struct netfront_info *np = netdev_priv(netdev);
1700
1701         free_percpu(np->rx_stats);
1702         free_percpu(np->tx_stats);
1703         free_netdev(netdev);
1704 }
1705
1706 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1707 {
1708         int err;
1709         struct net_device *netdev;
1710         struct netfront_info *np;
1711
1712         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1713         if (!netdev)
1714                 return ERR_PTR(-ENOMEM);
1715
1716         np                   = netdev_priv(netdev);
1717         np->xbdev            = dev;
1718
1719         np->queues = NULL;
1720
1721         err = -ENOMEM;
1722         np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1723         if (np->rx_stats == NULL)
1724                 goto exit;
1725         np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1726         if (np->tx_stats == NULL)
1727                 goto exit;
1728
1729         netdev->netdev_ops      = &xennet_netdev_ops;
1730
1731         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1732                                   NETIF_F_GSO_ROBUST;
1733         netdev->hw_features     = NETIF_F_SG |
1734                                   NETIF_F_IPV6_CSUM |
1735                                   NETIF_F_TSO | NETIF_F_TSO6;
1736
1737         /*
1738          * Assume that all hw features are available for now. This set
1739          * will be adjusted by the call to netdev_update_features() in
1740          * xennet_connect() which is the earliest point where we can
1741          * negotiate with the backend regarding supported features.
1742          */
1743         netdev->features |= netdev->hw_features;
1744
1745         netdev->ethtool_ops = &xennet_ethtool_ops;
1746         netdev->min_mtu = ETH_MIN_MTU;
1747         netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1748         SET_NETDEV_DEV(netdev, &dev->dev);
1749
1750         np->netdev = netdev;
1751         np->netfront_xdp_enabled = false;
1752
1753         netif_carrier_off(netdev);
1754
1755         do {
1756                 xenbus_switch_state(dev, XenbusStateInitialising);
1757                 err = wait_event_timeout(module_wq,
1758                                  xenbus_read_driver_state(dev->otherend) !=
1759                                  XenbusStateClosed &&
1760                                  xenbus_read_driver_state(dev->otherend) !=
1761                                  XenbusStateUnknown, XENNET_TIMEOUT);
1762         } while (!err);
1763
1764         return netdev;
1765
1766  exit:
1767         xennet_free_netdev(netdev);
1768         return ERR_PTR(err);
1769 }
1770
1771 /*
1772  * Entry point to this code when a new device is created.  Allocate the basic
1773  * structures and the ring buffers for communication with the backend, and
1774  * inform the backend of the appropriate details for those.
1775  */
1776 static int netfront_probe(struct xenbus_device *dev,
1777                           const struct xenbus_device_id *id)
1778 {
1779         int err;
1780         struct net_device *netdev;
1781         struct netfront_info *info;
1782
1783         netdev = xennet_create_dev(dev);
1784         if (IS_ERR(netdev)) {
1785                 err = PTR_ERR(netdev);
1786                 xenbus_dev_fatal(dev, err, "creating netdev");
1787                 return err;
1788         }
1789
1790         info = netdev_priv(netdev);
1791         dev_set_drvdata(&dev->dev, info);
1792 #ifdef CONFIG_SYSFS
1793         info->netdev->sysfs_groups[0] = &xennet_dev_group;
1794 #endif
1795
1796         return 0;
1797 }
1798
1799 static void xennet_end_access(int ref, void *page)
1800 {
1801         /* This frees the page as a side-effect */
1802         if (ref != INVALID_GRANT_REF)
1803                 gnttab_end_foreign_access(ref, virt_to_page(page));
1804 }
1805
1806 static void xennet_disconnect_backend(struct netfront_info *info)
1807 {
1808         unsigned int i = 0;
1809         unsigned int num_queues = info->netdev->real_num_tx_queues;
1810
1811         netif_carrier_off(info->netdev);
1812
1813         for (i = 0; i < num_queues && info->queues; ++i) {
1814                 struct netfront_queue *queue = &info->queues[i];
1815
1816                 del_timer_sync(&queue->rx_refill_timer);
1817
1818                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1819                         unbind_from_irqhandler(queue->tx_irq, queue);
1820                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1821                         unbind_from_irqhandler(queue->tx_irq, queue);
1822                         unbind_from_irqhandler(queue->rx_irq, queue);
1823                 }
1824                 queue->tx_evtchn = queue->rx_evtchn = 0;
1825                 queue->tx_irq = queue->rx_irq = 0;
1826
1827                 if (netif_running(info->netdev))
1828                         napi_synchronize(&queue->napi);
1829
1830                 xennet_release_tx_bufs(queue);
1831                 xennet_release_rx_bufs(queue);
1832                 gnttab_free_grant_references(queue->gref_tx_head);
1833                 gnttab_free_grant_references(queue->gref_rx_head);
1834
1835                 /* End access and free the pages */
1836                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1837                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1838
1839                 queue->tx_ring_ref = INVALID_GRANT_REF;
1840                 queue->rx_ring_ref = INVALID_GRANT_REF;
1841                 queue->tx.sring = NULL;
1842                 queue->rx.sring = NULL;
1843
1844                 page_pool_destroy(queue->page_pool);
1845         }
1846 }
1847
1848 /*
1849  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1850  * driver restart.  We tear down our netif structure and recreate it, but
1851  * leave the device-layer structures intact so that this is transparent to the
1852  * rest of the kernel.
1853  */
1854 static int netfront_resume(struct xenbus_device *dev)
1855 {
1856         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1857
1858         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1859
1860         netif_tx_lock_bh(info->netdev);
1861         netif_device_detach(info->netdev);
1862         netif_tx_unlock_bh(info->netdev);
1863
1864         xennet_disconnect_backend(info);
1865         return 0;
1866 }
1867
1868 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1869 {
1870         char *s, *e, *macstr;
1871         int i;
1872
1873         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1874         if (IS_ERR(macstr))
1875                 return PTR_ERR(macstr);
1876
1877         for (i = 0; i < ETH_ALEN; i++) {
1878                 mac[i] = simple_strtoul(s, &e, 16);
1879                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1880                         kfree(macstr);
1881                         return -ENOENT;
1882                 }
1883                 s = e+1;
1884         }
1885
1886         kfree(macstr);
1887         return 0;
1888 }
1889
1890 static int setup_netfront_single(struct netfront_queue *queue)
1891 {
1892         int err;
1893
1894         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1895         if (err < 0)
1896                 goto fail;
1897
1898         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1899                                                 xennet_interrupt, 0,
1900                                                 queue->info->netdev->name,
1901                                                 queue);
1902         if (err < 0)
1903                 goto bind_fail;
1904         queue->rx_evtchn = queue->tx_evtchn;
1905         queue->rx_irq = queue->tx_irq = err;
1906
1907         return 0;
1908
1909 bind_fail:
1910         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1911         queue->tx_evtchn = 0;
1912 fail:
1913         return err;
1914 }
1915
1916 static int setup_netfront_split(struct netfront_queue *queue)
1917 {
1918         int err;
1919
1920         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1921         if (err < 0)
1922                 goto fail;
1923         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1924         if (err < 0)
1925                 goto alloc_rx_evtchn_fail;
1926
1927         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1928                  "%s-tx", queue->name);
1929         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1930                                                 xennet_tx_interrupt, 0,
1931                                                 queue->tx_irq_name, queue);
1932         if (err < 0)
1933                 goto bind_tx_fail;
1934         queue->tx_irq = err;
1935
1936         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1937                  "%s-rx", queue->name);
1938         err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1939                                                 xennet_rx_interrupt, 0,
1940                                                 queue->rx_irq_name, queue);
1941         if (err < 0)
1942                 goto bind_rx_fail;
1943         queue->rx_irq = err;
1944
1945         return 0;
1946
1947 bind_rx_fail:
1948         unbind_from_irqhandler(queue->tx_irq, queue);
1949         queue->tx_irq = 0;
1950 bind_tx_fail:
1951         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1952         queue->rx_evtchn = 0;
1953 alloc_rx_evtchn_fail:
1954         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1955         queue->tx_evtchn = 0;
1956 fail:
1957         return err;
1958 }
1959
1960 static int setup_netfront(struct xenbus_device *dev,
1961                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1962 {
1963         struct xen_netif_tx_sring *txs;
1964         struct xen_netif_rx_sring *rxs;
1965         int err;
1966
1967         queue->tx_ring_ref = INVALID_GRANT_REF;
1968         queue->rx_ring_ref = INVALID_GRANT_REF;
1969         queue->rx.sring = NULL;
1970         queue->tx.sring = NULL;
1971
1972         err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
1973                                 1, &queue->tx_ring_ref);
1974         if (err)
1975                 goto fail;
1976
1977         XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1978
1979         err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
1980                                 1, &queue->rx_ring_ref);
1981         if (err)
1982                 goto fail;
1983
1984         XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1985
1986         if (feature_split_evtchn)
1987                 err = setup_netfront_split(queue);
1988         /* setup single event channel if
1989          *  a) feature-split-event-channels == 0
1990          *  b) feature-split-event-channels == 1 but failed to setup
1991          */
1992         if (!feature_split_evtchn || err)
1993                 err = setup_netfront_single(queue);
1994
1995         if (err)
1996                 goto fail;
1997
1998         return 0;
1999
2000  fail:
2001         xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2002         xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2003
2004         return err;
2005 }
2006
2007 /* Queue-specific initialisation
2008  * This used to be done in xennet_create_dev() but must now
2009  * be run per-queue.
2010  */
2011 static int xennet_init_queue(struct netfront_queue *queue)
2012 {
2013         unsigned short i;
2014         int err = 0;
2015         char *devid;
2016
2017         spin_lock_init(&queue->tx_lock);
2018         spin_lock_init(&queue->rx_lock);
2019         spin_lock_init(&queue->rx_cons_lock);
2020
2021         timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2022
2023         devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2024         snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2025                  devid, queue->id);
2026
2027         /* Initialise tx_skb_freelist as a free chain containing every entry. */
2028         queue->tx_skb_freelist = 0;
2029         queue->tx_pend_queue = TX_LINK_NONE;
2030         for (i = 0; i < NET_TX_RING_SIZE; i++) {
2031                 queue->tx_link[i] = i + 1;
2032                 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2033                 queue->grant_tx_page[i] = NULL;
2034         }
2035         queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2036
2037         /* Clear out rx_skbs */
2038         for (i = 0; i < NET_RX_RING_SIZE; i++) {
2039                 queue->rx_skbs[i] = NULL;
2040                 queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2041         }
2042
2043         /* A grant for every tx ring slot */
2044         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2045                                           &queue->gref_tx_head) < 0) {
2046                 pr_alert("can't alloc tx grant refs\n");
2047                 err = -ENOMEM;
2048                 goto exit;
2049         }
2050
2051         /* A grant for every rx ring slot */
2052         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2053                                           &queue->gref_rx_head) < 0) {
2054                 pr_alert("can't alloc rx grant refs\n");
2055                 err = -ENOMEM;
2056                 goto exit_free_tx;
2057         }
2058
2059         return 0;
2060
2061  exit_free_tx:
2062         gnttab_free_grant_references(queue->gref_tx_head);
2063  exit:
2064         return err;
2065 }
2066
2067 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2068                            struct xenbus_transaction *xbt, int write_hierarchical)
2069 {
2070         /* Write the queue-specific keys into XenStore in the traditional
2071          * way for a single queue, or in a queue subkeys for multiple
2072          * queues.
2073          */
2074         struct xenbus_device *dev = queue->info->xbdev;
2075         int err;
2076         const char *message;
2077         char *path;
2078         size_t pathsize;
2079
2080         /* Choose the correct place to write the keys */
2081         if (write_hierarchical) {
2082                 pathsize = strlen(dev->nodename) + 10;
2083                 path = kzalloc(pathsize, GFP_KERNEL);
2084                 if (!path) {
2085                         err = -ENOMEM;
2086                         message = "out of memory while writing ring references";
2087                         goto error;
2088                 }
2089                 snprintf(path, pathsize, "%s/queue-%u",
2090                                 dev->nodename, queue->id);
2091         } else {
2092                 path = (char *)dev->nodename;
2093         }
2094
2095         /* Write ring references */
2096         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2097                         queue->tx_ring_ref);
2098         if (err) {
2099                 message = "writing tx-ring-ref";
2100                 goto error;
2101         }
2102
2103         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2104                         queue->rx_ring_ref);
2105         if (err) {
2106                 message = "writing rx-ring-ref";
2107                 goto error;
2108         }
2109
2110         /* Write event channels; taking into account both shared
2111          * and split event channel scenarios.
2112          */
2113         if (queue->tx_evtchn == queue->rx_evtchn) {
2114                 /* Shared event channel */
2115                 err = xenbus_printf(*xbt, path,
2116                                 "event-channel", "%u", queue->tx_evtchn);
2117                 if (err) {
2118                         message = "writing event-channel";
2119                         goto error;
2120                 }
2121         } else {
2122                 /* Split event channels */
2123                 err = xenbus_printf(*xbt, path,
2124                                 "event-channel-tx", "%u", queue->tx_evtchn);
2125                 if (err) {
2126                         message = "writing event-channel-tx";
2127                         goto error;
2128                 }
2129
2130                 err = xenbus_printf(*xbt, path,
2131                                 "event-channel-rx", "%u", queue->rx_evtchn);
2132                 if (err) {
2133                         message = "writing event-channel-rx";
2134                         goto error;
2135                 }
2136         }
2137
2138         if (write_hierarchical)
2139                 kfree(path);
2140         return 0;
2141
2142 error:
2143         if (write_hierarchical)
2144                 kfree(path);
2145         xenbus_dev_fatal(dev, err, "%s", message);
2146         return err;
2147 }
2148
2149
2150
2151 static int xennet_create_page_pool(struct netfront_queue *queue)
2152 {
2153         int err;
2154         struct page_pool_params pp_params = {
2155                 .order = 0,
2156                 .flags = 0,
2157                 .pool_size = NET_RX_RING_SIZE,
2158                 .nid = NUMA_NO_NODE,
2159                 .dev = &queue->info->netdev->dev,
2160                 .offset = XDP_PACKET_HEADROOM,
2161                 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2162         };
2163
2164         queue->page_pool = page_pool_create(&pp_params);
2165         if (IS_ERR(queue->page_pool)) {
2166                 err = PTR_ERR(queue->page_pool);
2167                 queue->page_pool = NULL;
2168                 return err;
2169         }
2170
2171         err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2172                                queue->id, 0);
2173         if (err) {
2174                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2175                 goto err_free_pp;
2176         }
2177
2178         err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2179                                          MEM_TYPE_PAGE_POOL, queue->page_pool);
2180         if (err) {
2181                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2182                 goto err_unregister_rxq;
2183         }
2184         return 0;
2185
2186 err_unregister_rxq:
2187         xdp_rxq_info_unreg(&queue->xdp_rxq);
2188 err_free_pp:
2189         page_pool_destroy(queue->page_pool);
2190         queue->page_pool = NULL;
2191         return err;
2192 }
2193
2194 static int xennet_create_queues(struct netfront_info *info,
2195                                 unsigned int *num_queues)
2196 {
2197         unsigned int i;
2198         int ret;
2199
2200         info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2201                                GFP_KERNEL);
2202         if (!info->queues)
2203                 return -ENOMEM;
2204
2205         for (i = 0; i < *num_queues; i++) {
2206                 struct netfront_queue *queue = &info->queues[i];
2207
2208                 queue->id = i;
2209                 queue->info = info;
2210
2211                 ret = xennet_init_queue(queue);
2212                 if (ret < 0) {
2213                         dev_warn(&info->xbdev->dev,
2214                                  "only created %d queues\n", i);
2215                         *num_queues = i;
2216                         break;
2217                 }
2218
2219                 /* use page pool recycling instead of buddy allocator */
2220                 ret = xennet_create_page_pool(queue);
2221                 if (ret < 0) {
2222                         dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2223                         *num_queues = i;
2224                         return ret;
2225                 }
2226
2227                 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
2228                 if (netif_running(info->netdev))
2229                         napi_enable(&queue->napi);
2230         }
2231
2232         netif_set_real_num_tx_queues(info->netdev, *num_queues);
2233
2234         if (*num_queues == 0) {
2235                 dev_err(&info->xbdev->dev, "no queues\n");
2236                 return -EINVAL;
2237         }
2238         return 0;
2239 }
2240
2241 /* Common code used when first setting up, and when resuming. */
2242 static int talk_to_netback(struct xenbus_device *dev,
2243                            struct netfront_info *info)
2244 {
2245         const char *message;
2246         struct xenbus_transaction xbt;
2247         int err;
2248         unsigned int feature_split_evtchn;
2249         unsigned int i = 0;
2250         unsigned int max_queues = 0;
2251         struct netfront_queue *queue = NULL;
2252         unsigned int num_queues = 1;
2253         u8 addr[ETH_ALEN];
2254
2255         info->netdev->irq = 0;
2256
2257         /* Check if backend is trusted. */
2258         info->bounce = !xennet_trusted ||
2259                        !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2260
2261         /* Check if backend supports multiple queues */
2262         max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2263                                           "multi-queue-max-queues", 1);
2264         num_queues = min(max_queues, xennet_max_queues);
2265
2266         /* Check feature-split-event-channels */
2267         feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2268                                         "feature-split-event-channels", 0);
2269
2270         /* Read mac addr. */
2271         err = xen_net_read_mac(dev, addr);
2272         if (err) {
2273                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2274                 goto out_unlocked;
2275         }
2276         eth_hw_addr_set(info->netdev, addr);
2277
2278         info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2279                                                               "feature-xdp-headroom", 0);
2280         if (info->netback_has_xdp_headroom) {
2281                 /* set the current xen-netfront xdp state */
2282                 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2283                                           NETBACK_XDP_HEADROOM_ENABLE :
2284                                           NETBACK_XDP_HEADROOM_DISABLE);
2285                 if (err)
2286                         goto out_unlocked;
2287         }
2288
2289         rtnl_lock();
2290         if (info->queues)
2291                 xennet_destroy_queues(info);
2292
2293         /* For the case of a reconnect reset the "broken" indicator. */
2294         info->broken = false;
2295
2296         err = xennet_create_queues(info, &num_queues);
2297         if (err < 0) {
2298                 xenbus_dev_fatal(dev, err, "creating queues");
2299                 kfree(info->queues);
2300                 info->queues = NULL;
2301                 goto out;
2302         }
2303         rtnl_unlock();
2304
2305         /* Create shared ring, alloc event channel -- for each queue */
2306         for (i = 0; i < num_queues; ++i) {
2307                 queue = &info->queues[i];
2308                 err = setup_netfront(dev, queue, feature_split_evtchn);
2309                 if (err)
2310                         goto destroy_ring;
2311         }
2312
2313 again:
2314         err = xenbus_transaction_start(&xbt);
2315         if (err) {
2316                 xenbus_dev_fatal(dev, err, "starting transaction");
2317                 goto destroy_ring;
2318         }
2319
2320         if (xenbus_exists(XBT_NIL,
2321                           info->xbdev->otherend, "multi-queue-max-queues")) {
2322                 /* Write the number of queues */
2323                 err = xenbus_printf(xbt, dev->nodename,
2324                                     "multi-queue-num-queues", "%u", num_queues);
2325                 if (err) {
2326                         message = "writing multi-queue-num-queues";
2327                         goto abort_transaction_no_dev_fatal;
2328                 }
2329         }
2330
2331         if (num_queues == 1) {
2332                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2333                 if (err)
2334                         goto abort_transaction_no_dev_fatal;
2335         } else {
2336                 /* Write the keys for each queue */
2337                 for (i = 0; i < num_queues; ++i) {
2338                         queue = &info->queues[i];
2339                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2340                         if (err)
2341                                 goto abort_transaction_no_dev_fatal;
2342                 }
2343         }
2344
2345         /* The remaining keys are not queue-specific */
2346         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2347                             1);
2348         if (err) {
2349                 message = "writing request-rx-copy";
2350                 goto abort_transaction;
2351         }
2352
2353         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2354         if (err) {
2355                 message = "writing feature-rx-notify";
2356                 goto abort_transaction;
2357         }
2358
2359         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2360         if (err) {
2361                 message = "writing feature-sg";
2362                 goto abort_transaction;
2363         }
2364
2365         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2366         if (err) {
2367                 message = "writing feature-gso-tcpv4";
2368                 goto abort_transaction;
2369         }
2370
2371         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2372         if (err) {
2373                 message = "writing feature-gso-tcpv6";
2374                 goto abort_transaction;
2375         }
2376
2377         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2378                            "1");
2379         if (err) {
2380                 message = "writing feature-ipv6-csum-offload";
2381                 goto abort_transaction;
2382         }
2383
2384         err = xenbus_transaction_end(xbt, 0);
2385         if (err) {
2386                 if (err == -EAGAIN)
2387                         goto again;
2388                 xenbus_dev_fatal(dev, err, "completing transaction");
2389                 goto destroy_ring;
2390         }
2391
2392         return 0;
2393
2394  abort_transaction:
2395         xenbus_dev_fatal(dev, err, "%s", message);
2396 abort_transaction_no_dev_fatal:
2397         xenbus_transaction_end(xbt, 1);
2398  destroy_ring:
2399         xennet_disconnect_backend(info);
2400         rtnl_lock();
2401         xennet_destroy_queues(info);
2402  out:
2403         rtnl_unlock();
2404 out_unlocked:
2405         device_unregister(&dev->dev);
2406         return err;
2407 }
2408
2409 static int xennet_connect(struct net_device *dev)
2410 {
2411         struct netfront_info *np = netdev_priv(dev);
2412         unsigned int num_queues = 0;
2413         int err;
2414         unsigned int j = 0;
2415         struct netfront_queue *queue = NULL;
2416
2417         if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2418                 dev_info(&dev->dev,
2419                          "backend does not support copying receive path\n");
2420                 return -ENODEV;
2421         }
2422
2423         err = talk_to_netback(np->xbdev, np);
2424         if (err)
2425                 return err;
2426         if (np->netback_has_xdp_headroom)
2427                 pr_info("backend supports XDP headroom\n");
2428         if (np->bounce)
2429                 dev_info(&np->xbdev->dev,
2430                          "bouncing transmitted data to zeroed pages\n");
2431
2432         /* talk_to_netback() sets the correct number of queues */
2433         num_queues = dev->real_num_tx_queues;
2434
2435         if (dev->reg_state == NETREG_UNINITIALIZED) {
2436                 err = register_netdev(dev);
2437                 if (err) {
2438                         pr_warn("%s: register_netdev err=%d\n", __func__, err);
2439                         device_unregister(&np->xbdev->dev);
2440                         return err;
2441                 }
2442         }
2443
2444         rtnl_lock();
2445         netdev_update_features(dev);
2446         rtnl_unlock();
2447
2448         /*
2449          * All public and private state should now be sane.  Get
2450          * ready to start sending and receiving packets and give the driver
2451          * domain a kick because we've probably just requeued some
2452          * packets.
2453          */
2454         netif_tx_lock_bh(np->netdev);
2455         netif_device_attach(np->netdev);
2456         netif_tx_unlock_bh(np->netdev);
2457
2458         netif_carrier_on(np->netdev);
2459         for (j = 0; j < num_queues; ++j) {
2460                 queue = &np->queues[j];
2461
2462                 notify_remote_via_irq(queue->tx_irq);
2463                 if (queue->tx_irq != queue->rx_irq)
2464                         notify_remote_via_irq(queue->rx_irq);
2465
2466                 spin_lock_bh(&queue->rx_lock);
2467                 xennet_alloc_rx_buffers(queue);
2468                 spin_unlock_bh(&queue->rx_lock);
2469         }
2470
2471         return 0;
2472 }
2473
2474 /*
2475  * Callback received when the backend's state changes.
2476  */
2477 static void netback_changed(struct xenbus_device *dev,
2478                             enum xenbus_state backend_state)
2479 {
2480         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2481         struct net_device *netdev = np->netdev;
2482
2483         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2484
2485         wake_up_all(&module_wq);
2486
2487         switch (backend_state) {
2488         case XenbusStateInitialising:
2489         case XenbusStateInitialised:
2490         case XenbusStateReconfiguring:
2491         case XenbusStateReconfigured:
2492         case XenbusStateUnknown:
2493                 break;
2494
2495         case XenbusStateInitWait:
2496                 if (dev->state != XenbusStateInitialising)
2497                         break;
2498                 if (xennet_connect(netdev) != 0)
2499                         break;
2500                 xenbus_switch_state(dev, XenbusStateConnected);
2501                 break;
2502
2503         case XenbusStateConnected:
2504                 netdev_notify_peers(netdev);
2505                 break;
2506
2507         case XenbusStateClosed:
2508                 if (dev->state == XenbusStateClosed)
2509                         break;
2510                 fallthrough;    /* Missed the backend's CLOSING state */
2511         case XenbusStateClosing:
2512                 xenbus_frontend_closed(dev);
2513                 break;
2514         }
2515 }
2516
2517 static const struct xennet_stat {
2518         char name[ETH_GSTRING_LEN];
2519         u16 offset;
2520 } xennet_stats[] = {
2521         {
2522                 "rx_gso_checksum_fixup",
2523                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2524         },
2525 };
2526
2527 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2528 {
2529         switch (string_set) {
2530         case ETH_SS_STATS:
2531                 return ARRAY_SIZE(xennet_stats);
2532         default:
2533                 return -EINVAL;
2534         }
2535 }
2536
2537 static void xennet_get_ethtool_stats(struct net_device *dev,
2538                                      struct ethtool_stats *stats, u64 * data)
2539 {
2540         void *np = netdev_priv(dev);
2541         int i;
2542
2543         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2544                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2545 }
2546
2547 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2548 {
2549         int i;
2550
2551         switch (stringset) {
2552         case ETH_SS_STATS:
2553                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2554                         memcpy(data + i * ETH_GSTRING_LEN,
2555                                xennet_stats[i].name, ETH_GSTRING_LEN);
2556                 break;
2557         }
2558 }
2559
2560 static const struct ethtool_ops xennet_ethtool_ops =
2561 {
2562         .get_link = ethtool_op_get_link,
2563
2564         .get_sset_count = xennet_get_sset_count,
2565         .get_ethtool_stats = xennet_get_ethtool_stats,
2566         .get_strings = xennet_get_strings,
2567         .get_ts_info = ethtool_op_get_ts_info,
2568 };
2569
2570 #ifdef CONFIG_SYSFS
2571 static ssize_t show_rxbuf(struct device *dev,
2572                           struct device_attribute *attr, char *buf)
2573 {
2574         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2575 }
2576
2577 static ssize_t store_rxbuf(struct device *dev,
2578                            struct device_attribute *attr,
2579                            const char *buf, size_t len)
2580 {
2581         char *endp;
2582
2583         if (!capable(CAP_NET_ADMIN))
2584                 return -EPERM;
2585
2586         simple_strtoul(buf, &endp, 0);
2587         if (endp == buf)
2588                 return -EBADMSG;
2589
2590         /* rxbuf_min and rxbuf_max are no longer configurable. */
2591
2592         return len;
2593 }
2594
2595 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2596 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2597 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2598
2599 static struct attribute *xennet_dev_attrs[] = {
2600         &dev_attr_rxbuf_min.attr,
2601         &dev_attr_rxbuf_max.attr,
2602         &dev_attr_rxbuf_cur.attr,
2603         NULL
2604 };
2605
2606 static const struct attribute_group xennet_dev_group = {
2607         .attrs = xennet_dev_attrs
2608 };
2609 #endif /* CONFIG_SYSFS */
2610
2611 static void xennet_bus_close(struct xenbus_device *dev)
2612 {
2613         int ret;
2614
2615         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2616                 return;
2617         do {
2618                 xenbus_switch_state(dev, XenbusStateClosing);
2619                 ret = wait_event_timeout(module_wq,
2620                                    xenbus_read_driver_state(dev->otherend) ==
2621                                    XenbusStateClosing ||
2622                                    xenbus_read_driver_state(dev->otherend) ==
2623                                    XenbusStateClosed ||
2624                                    xenbus_read_driver_state(dev->otherend) ==
2625                                    XenbusStateUnknown,
2626                                    XENNET_TIMEOUT);
2627         } while (!ret);
2628
2629         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2630                 return;
2631
2632         do {
2633                 xenbus_switch_state(dev, XenbusStateClosed);
2634                 ret = wait_event_timeout(module_wq,
2635                                    xenbus_read_driver_state(dev->otherend) ==
2636                                    XenbusStateClosed ||
2637                                    xenbus_read_driver_state(dev->otherend) ==
2638                                    XenbusStateUnknown,
2639                                    XENNET_TIMEOUT);
2640         } while (!ret);
2641 }
2642
2643 static int xennet_remove(struct xenbus_device *dev)
2644 {
2645         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2646
2647         xennet_bus_close(dev);
2648         xennet_disconnect_backend(info);
2649
2650         if (info->netdev->reg_state == NETREG_REGISTERED)
2651                 unregister_netdev(info->netdev);
2652
2653         if (info->queues) {
2654                 rtnl_lock();
2655                 xennet_destroy_queues(info);
2656                 rtnl_unlock();
2657         }
2658         xennet_free_netdev(info->netdev);
2659
2660         return 0;
2661 }
2662
2663 static const struct xenbus_device_id netfront_ids[] = {
2664         { "vif" },
2665         { "" }
2666 };
2667
2668 static struct xenbus_driver netfront_driver = {
2669         .ids = netfront_ids,
2670         .probe = netfront_probe,
2671         .remove = xennet_remove,
2672         .resume = netfront_resume,
2673         .otherend_changed = netback_changed,
2674 };
2675
2676 static int __init netif_init(void)
2677 {
2678         if (!xen_domain())
2679                 return -ENODEV;
2680
2681         if (!xen_has_pv_nic_devices())
2682                 return -ENODEV;
2683
2684         pr_info("Initialising Xen virtual ethernet driver\n");
2685
2686         /* Allow as many queues as there are CPUs inut max. 8 if user has not
2687          * specified a value.
2688          */
2689         if (xennet_max_queues == 0)
2690                 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2691                                           num_online_cpus());
2692
2693         return xenbus_register_frontend(&netfront_driver);
2694 }
2695 module_init(netif_init);
2696
2697
2698 static void __exit netif_exit(void)
2699 {
2700         xenbus_unregister_driver(&netfront_driver);
2701 }
2702 module_exit(netif_exit);
2703
2704 MODULE_DESCRIPTION("Xen virtual network device frontend");
2705 MODULE_LICENSE("GPL");
2706 MODULE_ALIAS("xen:vif");
2707 MODULE_ALIAS("xennet");