powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / net / virtio_net.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3  *
4  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5  */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <net/route.h>
23 #include <net/xdp.h>
24 #include <net/net_failover.h>
25 #include <net/netdev_rx_queue.h>
26
27 static int napi_weight = NAPI_POLL_WEIGHT;
28 module_param(napi_weight, int, 0444);
29
30 static bool csum = true, gso = true, napi_tx = true;
31 module_param(csum, bool, 0444);
32 module_param(gso, bool, 0444);
33 module_param(napi_tx, bool, 0644);
34
35 /* FIXME: MTU in config. */
36 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
37 #define GOOD_COPY_LEN   128
38
39 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
40
41 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
42 #define VIRTIO_XDP_HEADROOM 256
43
44 /* Separating two types of XDP xmit */
45 #define VIRTIO_XDP_TX           BIT(0)
46 #define VIRTIO_XDP_REDIR        BIT(1)
47
48 #define VIRTIO_XDP_FLAG BIT(0)
49
50 /* RX packet size EWMA. The average packet size is used to determine the packet
51  * buffer size when refilling RX rings. As the entire RX ring may be refilled
52  * at once, the weight is chosen so that the EWMA will be insensitive to short-
53  * term, transient changes in packet size.
54  */
55 DECLARE_EWMA(pkt_len, 0, 64)
56
57 #define VIRTNET_DRIVER_VERSION "1.0.0"
58
59 static const unsigned long guest_offloads[] = {
60         VIRTIO_NET_F_GUEST_TSO4,
61         VIRTIO_NET_F_GUEST_TSO6,
62         VIRTIO_NET_F_GUEST_ECN,
63         VIRTIO_NET_F_GUEST_UFO,
64         VIRTIO_NET_F_GUEST_CSUM,
65         VIRTIO_NET_F_GUEST_USO4,
66         VIRTIO_NET_F_GUEST_USO6,
67         VIRTIO_NET_F_GUEST_HDRLEN
68 };
69
70 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
71                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
72                                 (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
73                                 (1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
74                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
75                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
76
77 struct virtnet_stat_desc {
78         char desc[ETH_GSTRING_LEN];
79         size_t offset;
80 };
81
82 struct virtnet_sq_stats {
83         struct u64_stats_sync syncp;
84         u64 packets;
85         u64 bytes;
86         u64 xdp_tx;
87         u64 xdp_tx_drops;
88         u64 kicks;
89         u64 tx_timeouts;
90 };
91
92 struct virtnet_rq_stats {
93         struct u64_stats_sync syncp;
94         u64 packets;
95         u64 bytes;
96         u64 drops;
97         u64 xdp_packets;
98         u64 xdp_tx;
99         u64 xdp_redirects;
100         u64 xdp_drops;
101         u64 kicks;
102 };
103
104 #define VIRTNET_SQ_STAT(m)      offsetof(struct virtnet_sq_stats, m)
105 #define VIRTNET_RQ_STAT(m)      offsetof(struct virtnet_rq_stats, m)
106
107 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
108         { "packets",            VIRTNET_SQ_STAT(packets) },
109         { "bytes",              VIRTNET_SQ_STAT(bytes) },
110         { "xdp_tx",             VIRTNET_SQ_STAT(xdp_tx) },
111         { "xdp_tx_drops",       VIRTNET_SQ_STAT(xdp_tx_drops) },
112         { "kicks",              VIRTNET_SQ_STAT(kicks) },
113         { "tx_timeouts",        VIRTNET_SQ_STAT(tx_timeouts) },
114 };
115
116 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
117         { "packets",            VIRTNET_RQ_STAT(packets) },
118         { "bytes",              VIRTNET_RQ_STAT(bytes) },
119         { "drops",              VIRTNET_RQ_STAT(drops) },
120         { "xdp_packets",        VIRTNET_RQ_STAT(xdp_packets) },
121         { "xdp_tx",             VIRTNET_RQ_STAT(xdp_tx) },
122         { "xdp_redirects",      VIRTNET_RQ_STAT(xdp_redirects) },
123         { "xdp_drops",          VIRTNET_RQ_STAT(xdp_drops) },
124         { "kicks",              VIRTNET_RQ_STAT(kicks) },
125 };
126
127 #define VIRTNET_SQ_STATS_LEN    ARRAY_SIZE(virtnet_sq_stats_desc)
128 #define VIRTNET_RQ_STATS_LEN    ARRAY_SIZE(virtnet_rq_stats_desc)
129
130 struct virtnet_interrupt_coalesce {
131         u32 max_packets;
132         u32 max_usecs;
133 };
134
135 /* The dma information of pages allocated at a time. */
136 struct virtnet_rq_dma {
137         dma_addr_t addr;
138         u32 ref;
139         u16 len;
140         u16 need_sync;
141 };
142
143 /* Internal representation of a send virtqueue */
144 struct send_queue {
145         /* Virtqueue associated with this send _queue */
146         struct virtqueue *vq;
147
148         /* TX: fragments + linear part + virtio header */
149         struct scatterlist sg[MAX_SKB_FRAGS + 2];
150
151         /* Name of the send queue: output.$index */
152         char name[16];
153
154         struct virtnet_sq_stats stats;
155
156         struct virtnet_interrupt_coalesce intr_coal;
157
158         struct napi_struct napi;
159
160         /* Record whether sq is in reset state. */
161         bool reset;
162 };
163
164 /* Internal representation of a receive virtqueue */
165 struct receive_queue {
166         /* Virtqueue associated with this receive_queue */
167         struct virtqueue *vq;
168
169         struct napi_struct napi;
170
171         struct bpf_prog __rcu *xdp_prog;
172
173         struct virtnet_rq_stats stats;
174
175         struct virtnet_interrupt_coalesce intr_coal;
176
177         /* Chain pages by the private ptr. */
178         struct page *pages;
179
180         /* Average packet length for mergeable receive buffers. */
181         struct ewma_pkt_len mrg_avg_pkt_len;
182
183         /* Page frag for packet buffer allocation. */
184         struct page_frag alloc_frag;
185
186         /* RX: fragments + linear part + virtio header */
187         struct scatterlist sg[MAX_SKB_FRAGS + 2];
188
189         /* Min single buffer size for mergeable buffers case. */
190         unsigned int min_buf_len;
191
192         /* Name of this receive queue: input.$index */
193         char name[16];
194
195         struct xdp_rxq_info xdp_rxq;
196
197         /* Record the last dma info to free after new pages is allocated. */
198         struct virtnet_rq_dma *last_dma;
199
200         /* Do dma by self */
201         bool do_dma;
202 };
203
204 /* This structure can contain rss message with maximum settings for indirection table and keysize
205  * Note, that default structure that describes RSS configuration virtio_net_rss_config
206  * contains same info but can't handle table values.
207  * In any case, structure would be passed to virtio hw through sg_buf split by parts
208  * because table sizes may be differ according to the device configuration.
209  */
210 #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
211 #define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
212 struct virtio_net_ctrl_rss {
213         u32 hash_types;
214         u16 indirection_table_mask;
215         u16 unclassified_queue;
216         u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
217         u16 max_tx_vq;
218         u8 hash_key_length;
219         u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
220 };
221
222 /* Control VQ buffers: protected by the rtnl lock */
223 struct control_buf {
224         struct virtio_net_ctrl_hdr hdr;
225         virtio_net_ctrl_ack status;
226         struct virtio_net_ctrl_mq mq;
227         u8 promisc;
228         u8 allmulti;
229         __virtio16 vid;
230         __virtio64 offloads;
231         struct virtio_net_ctrl_rss rss;
232         struct virtio_net_ctrl_coal_tx coal_tx;
233         struct virtio_net_ctrl_coal_rx coal_rx;
234         struct virtio_net_ctrl_coal_vq coal_vq;
235 };
236
237 struct virtnet_info {
238         struct virtio_device *vdev;
239         struct virtqueue *cvq;
240         struct net_device *dev;
241         struct send_queue *sq;
242         struct receive_queue *rq;
243         unsigned int status;
244
245         /* Max # of queue pairs supported by the device */
246         u16 max_queue_pairs;
247
248         /* # of queue pairs currently used by the driver */
249         u16 curr_queue_pairs;
250
251         /* # of XDP queue pairs currently used by the driver */
252         u16 xdp_queue_pairs;
253
254         /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
255         bool xdp_enabled;
256
257         /* I like... big packets and I cannot lie! */
258         bool big_packets;
259
260         /* number of sg entries allocated for big packets */
261         unsigned int big_packets_num_skbfrags;
262
263         /* Host will merge rx buffers for big packets (shake it! shake it!) */
264         bool mergeable_rx_bufs;
265
266         /* Host supports rss and/or hash report */
267         bool has_rss;
268         bool has_rss_hash_report;
269         u8 rss_key_size;
270         u16 rss_indir_table_size;
271         u32 rss_hash_types_supported;
272         u32 rss_hash_types_saved;
273
274         /* Has control virtqueue */
275         bool has_cvq;
276
277         /* Host can handle any s/g split between our header and packet data */
278         bool any_header_sg;
279
280         /* Packet virtio header size */
281         u8 hdr_len;
282
283         /* Work struct for delayed refilling if we run low on memory. */
284         struct delayed_work refill;
285
286         /* Is delayed refill enabled? */
287         bool refill_enabled;
288
289         /* The lock to synchronize the access to refill_enabled */
290         spinlock_t refill_lock;
291
292         /* Work struct for config space updates */
293         struct work_struct config_work;
294
295         /* Does the affinity hint is set for virtqueues? */
296         bool affinity_hint_set;
297
298         /* CPU hotplug instances for online & dead */
299         struct hlist_node node;
300         struct hlist_node node_dead;
301
302         struct control_buf *ctrl;
303
304         /* Ethtool settings */
305         u8 duplex;
306         u32 speed;
307
308         /* Interrupt coalescing settings */
309         struct virtnet_interrupt_coalesce intr_coal_tx;
310         struct virtnet_interrupt_coalesce intr_coal_rx;
311
312         unsigned long guest_offloads;
313         unsigned long guest_offloads_capable;
314
315         /* failover when STANDBY feature enabled */
316         struct failover *failover;
317 };
318
319 struct padded_vnet_hdr {
320         struct virtio_net_hdr_v1_hash hdr;
321         /*
322          * hdr is in a separate sg buffer, and data sg buffer shares same page
323          * with this header sg. This padding makes next sg 16 byte aligned
324          * after the header.
325          */
326         char padding[12];
327 };
328
329 struct virtio_net_common_hdr {
330         union {
331                 struct virtio_net_hdr hdr;
332                 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
333                 struct virtio_net_hdr_v1_hash hash_v1_hdr;
334         };
335 };
336
337 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
338 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
339
340 static bool is_xdp_frame(void *ptr)
341 {
342         return (unsigned long)ptr & VIRTIO_XDP_FLAG;
343 }
344
345 static void *xdp_to_ptr(struct xdp_frame *ptr)
346 {
347         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
348 }
349
350 static struct xdp_frame *ptr_to_xdp(void *ptr)
351 {
352         return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
353 }
354
355 /* Converting between virtqueue no. and kernel tx/rx queue no.
356  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
357  */
358 static int vq2txq(struct virtqueue *vq)
359 {
360         return (vq->index - 1) / 2;
361 }
362
363 static int txq2vq(int txq)
364 {
365         return txq * 2 + 1;
366 }
367
368 static int vq2rxq(struct virtqueue *vq)
369 {
370         return vq->index / 2;
371 }
372
373 static int rxq2vq(int rxq)
374 {
375         return rxq * 2;
376 }
377
378 static inline struct virtio_net_common_hdr *
379 skb_vnet_common_hdr(struct sk_buff *skb)
380 {
381         return (struct virtio_net_common_hdr *)skb->cb;
382 }
383
384 /*
385  * private is used to chain pages for big packets, put the whole
386  * most recent used list in the beginning for reuse
387  */
388 static void give_pages(struct receive_queue *rq, struct page *page)
389 {
390         struct page *end;
391
392         /* Find end of list, sew whole thing into vi->rq.pages. */
393         for (end = page; end->private; end = (struct page *)end->private);
394         end->private = (unsigned long)rq->pages;
395         rq->pages = page;
396 }
397
398 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
399 {
400         struct page *p = rq->pages;
401
402         if (p) {
403                 rq->pages = (struct page *)p->private;
404                 /* clear private here, it is used to chain pages */
405                 p->private = 0;
406         } else
407                 p = alloc_page(gfp_mask);
408         return p;
409 }
410
411 static void enable_delayed_refill(struct virtnet_info *vi)
412 {
413         spin_lock_bh(&vi->refill_lock);
414         vi->refill_enabled = true;
415         spin_unlock_bh(&vi->refill_lock);
416 }
417
418 static void disable_delayed_refill(struct virtnet_info *vi)
419 {
420         spin_lock_bh(&vi->refill_lock);
421         vi->refill_enabled = false;
422         spin_unlock_bh(&vi->refill_lock);
423 }
424
425 static void virtqueue_napi_schedule(struct napi_struct *napi,
426                                     struct virtqueue *vq)
427 {
428         if (napi_schedule_prep(napi)) {
429                 virtqueue_disable_cb(vq);
430                 __napi_schedule(napi);
431         }
432 }
433
434 static void virtqueue_napi_complete(struct napi_struct *napi,
435                                     struct virtqueue *vq, int processed)
436 {
437         int opaque;
438
439         opaque = virtqueue_enable_cb_prepare(vq);
440         if (napi_complete_done(napi, processed)) {
441                 if (unlikely(virtqueue_poll(vq, opaque)))
442                         virtqueue_napi_schedule(napi, vq);
443         } else {
444                 virtqueue_disable_cb(vq);
445         }
446 }
447
448 static void skb_xmit_done(struct virtqueue *vq)
449 {
450         struct virtnet_info *vi = vq->vdev->priv;
451         struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
452
453         /* Suppress further interrupts. */
454         virtqueue_disable_cb(vq);
455
456         if (napi->weight)
457                 virtqueue_napi_schedule(napi, vq);
458         else
459                 /* We were probably waiting for more output buffers. */
460                 netif_wake_subqueue(vi->dev, vq2txq(vq));
461 }
462
463 #define MRG_CTX_HEADER_SHIFT 22
464 static void *mergeable_len_to_ctx(unsigned int truesize,
465                                   unsigned int headroom)
466 {
467         return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
468 }
469
470 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
471 {
472         return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
473 }
474
475 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
476 {
477         return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
478 }
479
480 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
481                                          unsigned int headroom,
482                                          unsigned int len)
483 {
484         struct sk_buff *skb;
485
486         skb = build_skb(buf, buflen);
487         if (unlikely(!skb))
488                 return NULL;
489
490         skb_reserve(skb, headroom);
491         skb_put(skb, len);
492
493         return skb;
494 }
495
496 /* Called from bottom half context */
497 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
498                                    struct receive_queue *rq,
499                                    struct page *page, unsigned int offset,
500                                    unsigned int len, unsigned int truesize,
501                                    unsigned int headroom)
502 {
503         struct sk_buff *skb;
504         struct virtio_net_common_hdr *hdr;
505         unsigned int copy, hdr_len, hdr_padded_len;
506         struct page *page_to_free = NULL;
507         int tailroom, shinfo_size;
508         char *p, *hdr_p, *buf;
509
510         p = page_address(page) + offset;
511         hdr_p = p;
512
513         hdr_len = vi->hdr_len;
514         if (vi->mergeable_rx_bufs)
515                 hdr_padded_len = hdr_len;
516         else
517                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
518
519         buf = p - headroom;
520         len -= hdr_len;
521         offset += hdr_padded_len;
522         p += hdr_padded_len;
523         tailroom = truesize - headroom  - hdr_padded_len - len;
524
525         shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
526
527         /* copy small packet so we can reuse these pages */
528         if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
529                 skb = virtnet_build_skb(buf, truesize, p - buf, len);
530                 if (unlikely(!skb))
531                         return NULL;
532
533                 page = (struct page *)page->private;
534                 if (page)
535                         give_pages(rq, page);
536                 goto ok;
537         }
538
539         /* copy small packet so we can reuse these pages for small data */
540         skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
541         if (unlikely(!skb))
542                 return NULL;
543
544         /* Copy all frame if it fits skb->head, otherwise
545          * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
546          */
547         if (len <= skb_tailroom(skb))
548                 copy = len;
549         else
550                 copy = ETH_HLEN;
551         skb_put_data(skb, p, copy);
552
553         len -= copy;
554         offset += copy;
555
556         if (vi->mergeable_rx_bufs) {
557                 if (len)
558                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
559                 else
560                         page_to_free = page;
561                 goto ok;
562         }
563
564         /*
565          * Verify that we can indeed put this data into a skb.
566          * This is here to handle cases when the device erroneously
567          * tries to receive more than is possible. This is usually
568          * the case of a broken device.
569          */
570         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
571                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
572                 dev_kfree_skb(skb);
573                 return NULL;
574         }
575         BUG_ON(offset >= PAGE_SIZE);
576         while (len) {
577                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
578                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
579                                 frag_size, truesize);
580                 len -= frag_size;
581                 page = (struct page *)page->private;
582                 offset = 0;
583         }
584
585         if (page)
586                 give_pages(rq, page);
587
588 ok:
589         hdr = skb_vnet_common_hdr(skb);
590         memcpy(hdr, hdr_p, hdr_len);
591         if (page_to_free)
592                 put_page(page_to_free);
593
594         return skb;
595 }
596
597 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
598 {
599         struct page *page = virt_to_head_page(buf);
600         struct virtnet_rq_dma *dma;
601         void *head;
602         int offset;
603
604         head = page_address(page);
605
606         dma = head;
607
608         --dma->ref;
609
610         if (dma->ref) {
611                 if (dma->need_sync && len) {
612                         offset = buf - (head + sizeof(*dma));
613
614                         virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
615                                                                 len, DMA_FROM_DEVICE);
616                 }
617
618                 return;
619         }
620
621         virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
622                                          DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
623         put_page(page);
624 }
625
626 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
627 {
628         void *buf;
629
630         buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
631         if (buf && rq->do_dma)
632                 virtnet_rq_unmap(rq, buf, *len);
633
634         return buf;
635 }
636
637 static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
638 {
639         void *buf;
640
641         buf = virtqueue_detach_unused_buf(rq->vq);
642         if (buf && rq->do_dma)
643                 virtnet_rq_unmap(rq, buf, 0);
644
645         return buf;
646 }
647
648 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
649 {
650         struct virtnet_rq_dma *dma;
651         dma_addr_t addr;
652         u32 offset;
653         void *head;
654
655         if (!rq->do_dma) {
656                 sg_init_one(rq->sg, buf, len);
657                 return;
658         }
659
660         head = page_address(rq->alloc_frag.page);
661
662         offset = buf - head;
663
664         dma = head;
665
666         addr = dma->addr - sizeof(*dma) + offset;
667
668         sg_init_table(rq->sg, 1);
669         rq->sg[0].dma_address = addr;
670         rq->sg[0].length = len;
671 }
672
673 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
674 {
675         struct page_frag *alloc_frag = &rq->alloc_frag;
676         struct virtnet_rq_dma *dma;
677         void *buf, *head;
678         dma_addr_t addr;
679
680         if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
681                 return NULL;
682
683         head = page_address(alloc_frag->page);
684
685         if (rq->do_dma) {
686                 dma = head;
687
688                 /* new pages */
689                 if (!alloc_frag->offset) {
690                         if (rq->last_dma) {
691                                 /* Now, the new page is allocated, the last dma
692                                  * will not be used. So the dma can be unmapped
693                                  * if the ref is 0.
694                                  */
695                                 virtnet_rq_unmap(rq, rq->last_dma, 0);
696                                 rq->last_dma = NULL;
697                         }
698
699                         dma->len = alloc_frag->size - sizeof(*dma);
700
701                         addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
702                                                               dma->len, DMA_FROM_DEVICE, 0);
703                         if (virtqueue_dma_mapping_error(rq->vq, addr))
704                                 return NULL;
705
706                         dma->addr = addr;
707                         dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
708
709                         /* Add a reference to dma to prevent the entire dma from
710                          * being released during error handling. This reference
711                          * will be freed after the pages are no longer used.
712                          */
713                         get_page(alloc_frag->page);
714                         dma->ref = 1;
715                         alloc_frag->offset = sizeof(*dma);
716
717                         rq->last_dma = dma;
718                 }
719
720                 ++dma->ref;
721         }
722
723         buf = head + alloc_frag->offset;
724
725         get_page(alloc_frag->page);
726         alloc_frag->offset += size;
727
728         return buf;
729 }
730
731 static void virtnet_rq_set_premapped(struct virtnet_info *vi)
732 {
733         int i;
734
735         /* disable for big mode */
736         if (!vi->mergeable_rx_bufs && vi->big_packets)
737                 return;
738
739         for (i = 0; i < vi->max_queue_pairs; i++) {
740                 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
741                         continue;
742
743                 vi->rq[i].do_dma = true;
744         }
745 }
746
747 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
748 {
749         unsigned int len;
750         unsigned int packets = 0;
751         unsigned int bytes = 0;
752         void *ptr;
753
754         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
755                 if (likely(!is_xdp_frame(ptr))) {
756                         struct sk_buff *skb = ptr;
757
758                         pr_debug("Sent skb %p\n", skb);
759
760                         bytes += skb->len;
761                         napi_consume_skb(skb, in_napi);
762                 } else {
763                         struct xdp_frame *frame = ptr_to_xdp(ptr);
764
765                         bytes += xdp_get_frame_len(frame);
766                         xdp_return_frame(frame);
767                 }
768                 packets++;
769         }
770
771         /* Avoid overhead when no packets have been processed
772          * happens when called speculatively from start_xmit.
773          */
774         if (!packets)
775                 return;
776
777         u64_stats_update_begin(&sq->stats.syncp);
778         sq->stats.bytes += bytes;
779         sq->stats.packets += packets;
780         u64_stats_update_end(&sq->stats.syncp);
781 }
782
783 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
784 {
785         if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
786                 return false;
787         else if (q < vi->curr_queue_pairs)
788                 return true;
789         else
790                 return false;
791 }
792
793 static void check_sq_full_and_disable(struct virtnet_info *vi,
794                                       struct net_device *dev,
795                                       struct send_queue *sq)
796 {
797         bool use_napi = sq->napi.weight;
798         int qnum;
799
800         qnum = sq - vi->sq;
801
802         /* If running out of space, stop queue to avoid getting packets that we
803          * are then unable to transmit.
804          * An alternative would be to force queuing layer to requeue the skb by
805          * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
806          * returned in a normal path of operation: it means that driver is not
807          * maintaining the TX queue stop/start state properly, and causes
808          * the stack to do a non-trivial amount of useless work.
809          * Since most packets only take 1 or 2 ring slots, stopping the queue
810          * early means 16 slots are typically wasted.
811          */
812         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
813                 netif_stop_subqueue(dev, qnum);
814                 if (use_napi) {
815                         if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
816                                 virtqueue_napi_schedule(&sq->napi, sq->vq);
817                 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
818                         /* More just got used, free them then recheck. */
819                         free_old_xmit_skbs(sq, false);
820                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
821                                 netif_start_subqueue(dev, qnum);
822                                 virtqueue_disable_cb(sq->vq);
823                         }
824                 }
825         }
826 }
827
828 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
829                                    struct send_queue *sq,
830                                    struct xdp_frame *xdpf)
831 {
832         struct virtio_net_hdr_mrg_rxbuf *hdr;
833         struct skb_shared_info *shinfo;
834         u8 nr_frags = 0;
835         int err, i;
836
837         if (unlikely(xdpf->headroom < vi->hdr_len))
838                 return -EOVERFLOW;
839
840         if (unlikely(xdp_frame_has_frags(xdpf))) {
841                 shinfo = xdp_get_shared_info_from_frame(xdpf);
842                 nr_frags = shinfo->nr_frags;
843         }
844
845         /* In wrapping function virtnet_xdp_xmit(), we need to free
846          * up the pending old buffers, where we need to calculate the
847          * position of skb_shared_info in xdp_get_frame_len() and
848          * xdp_return_frame(), which will involve to xdpf->data and
849          * xdpf->headroom. Therefore, we need to update the value of
850          * headroom synchronously here.
851          */
852         xdpf->headroom -= vi->hdr_len;
853         xdpf->data -= vi->hdr_len;
854         /* Zero header and leave csum up to XDP layers */
855         hdr = xdpf->data;
856         memset(hdr, 0, vi->hdr_len);
857         xdpf->len   += vi->hdr_len;
858
859         sg_init_table(sq->sg, nr_frags + 1);
860         sg_set_buf(sq->sg, xdpf->data, xdpf->len);
861         for (i = 0; i < nr_frags; i++) {
862                 skb_frag_t *frag = &shinfo->frags[i];
863
864                 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
865                             skb_frag_size(frag), skb_frag_off(frag));
866         }
867
868         err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
869                                    xdp_to_ptr(xdpf), GFP_ATOMIC);
870         if (unlikely(err))
871                 return -ENOSPC; /* Caller handle free/refcnt */
872
873         return 0;
874 }
875
876 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
877  * the current cpu, so it does not need to be locked.
878  *
879  * Here we use marco instead of inline functions because we have to deal with
880  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
881  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
882  * functions to perfectly solve these three problems at the same time.
883  */
884 #define virtnet_xdp_get_sq(vi) ({                                       \
885         int cpu = smp_processor_id();                                   \
886         struct netdev_queue *txq;                                       \
887         typeof(vi) v = (vi);                                            \
888         unsigned int qp;                                                \
889                                                                         \
890         if (v->curr_queue_pairs > nr_cpu_ids) {                         \
891                 qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
892                 qp += cpu;                                              \
893                 txq = netdev_get_tx_queue(v->dev, qp);                  \
894                 __netif_tx_acquire(txq);                                \
895         } else {                                                        \
896                 qp = cpu % v->curr_queue_pairs;                         \
897                 txq = netdev_get_tx_queue(v->dev, qp);                  \
898                 __netif_tx_lock(txq, cpu);                              \
899         }                                                               \
900         v->sq + qp;                                                     \
901 })
902
903 #define virtnet_xdp_put_sq(vi, q) {                                     \
904         struct netdev_queue *txq;                                       \
905         typeof(vi) v = (vi);                                            \
906                                                                         \
907         txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
908         if (v->curr_queue_pairs > nr_cpu_ids)                           \
909                 __netif_tx_release(txq);                                \
910         else                                                            \
911                 __netif_tx_unlock(txq);                                 \
912 }
913
914 static int virtnet_xdp_xmit(struct net_device *dev,
915                             int n, struct xdp_frame **frames, u32 flags)
916 {
917         struct virtnet_info *vi = netdev_priv(dev);
918         struct receive_queue *rq = vi->rq;
919         struct bpf_prog *xdp_prog;
920         struct send_queue *sq;
921         unsigned int len;
922         int packets = 0;
923         int bytes = 0;
924         int nxmit = 0;
925         int kicks = 0;
926         void *ptr;
927         int ret;
928         int i;
929
930         /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
931          * indicate XDP resources have been successfully allocated.
932          */
933         xdp_prog = rcu_access_pointer(rq->xdp_prog);
934         if (!xdp_prog)
935                 return -ENXIO;
936
937         sq = virtnet_xdp_get_sq(vi);
938
939         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
940                 ret = -EINVAL;
941                 goto out;
942         }
943
944         /* Free up any pending old buffers before queueing new ones. */
945         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
946                 if (likely(is_xdp_frame(ptr))) {
947                         struct xdp_frame *frame = ptr_to_xdp(ptr);
948
949                         bytes += xdp_get_frame_len(frame);
950                         xdp_return_frame(frame);
951                 } else {
952                         struct sk_buff *skb = ptr;
953
954                         bytes += skb->len;
955                         napi_consume_skb(skb, false);
956                 }
957                 packets++;
958         }
959
960         for (i = 0; i < n; i++) {
961                 struct xdp_frame *xdpf = frames[i];
962
963                 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
964                         break;
965                 nxmit++;
966         }
967         ret = nxmit;
968
969         if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
970                 check_sq_full_and_disable(vi, dev, sq);
971
972         if (flags & XDP_XMIT_FLUSH) {
973                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
974                         kicks = 1;
975         }
976 out:
977         u64_stats_update_begin(&sq->stats.syncp);
978         sq->stats.bytes += bytes;
979         sq->stats.packets += packets;
980         sq->stats.xdp_tx += n;
981         sq->stats.xdp_tx_drops += n - nxmit;
982         sq->stats.kicks += kicks;
983         u64_stats_update_end(&sq->stats.syncp);
984
985         virtnet_xdp_put_sq(vi, sq);
986         return ret;
987 }
988
989 static void put_xdp_frags(struct xdp_buff *xdp)
990 {
991         struct skb_shared_info *shinfo;
992         struct page *xdp_page;
993         int i;
994
995         if (xdp_buff_has_frags(xdp)) {
996                 shinfo = xdp_get_shared_info_from_buff(xdp);
997                 for (i = 0; i < shinfo->nr_frags; i++) {
998                         xdp_page = skb_frag_page(&shinfo->frags[i]);
999                         put_page(xdp_page);
1000                 }
1001         }
1002 }
1003
1004 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1005                                struct net_device *dev,
1006                                unsigned int *xdp_xmit,
1007                                struct virtnet_rq_stats *stats)
1008 {
1009         struct xdp_frame *xdpf;
1010         int err;
1011         u32 act;
1012
1013         act = bpf_prog_run_xdp(xdp_prog, xdp);
1014         stats->xdp_packets++;
1015
1016         switch (act) {
1017         case XDP_PASS:
1018                 return act;
1019
1020         case XDP_TX:
1021                 stats->xdp_tx++;
1022                 xdpf = xdp_convert_buff_to_frame(xdp);
1023                 if (unlikely(!xdpf)) {
1024                         netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1025                         return XDP_DROP;
1026                 }
1027
1028                 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1029                 if (unlikely(!err)) {
1030                         xdp_return_frame_rx_napi(xdpf);
1031                 } else if (unlikely(err < 0)) {
1032                         trace_xdp_exception(dev, xdp_prog, act);
1033                         return XDP_DROP;
1034                 }
1035                 *xdp_xmit |= VIRTIO_XDP_TX;
1036                 return act;
1037
1038         case XDP_REDIRECT:
1039                 stats->xdp_redirects++;
1040                 err = xdp_do_redirect(dev, xdp, xdp_prog);
1041                 if (err)
1042                         return XDP_DROP;
1043
1044                 *xdp_xmit |= VIRTIO_XDP_REDIR;
1045                 return act;
1046
1047         default:
1048                 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1049                 fallthrough;
1050         case XDP_ABORTED:
1051                 trace_xdp_exception(dev, xdp_prog, act);
1052                 fallthrough;
1053         case XDP_DROP:
1054                 return XDP_DROP;
1055         }
1056 }
1057
1058 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1059 {
1060         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1061 }
1062
1063 /* We copy the packet for XDP in the following cases:
1064  *
1065  * 1) Packet is scattered across multiple rx buffers.
1066  * 2) Headroom space is insufficient.
1067  *
1068  * This is inefficient but it's a temporary condition that
1069  * we hit right after XDP is enabled and until queue is refilled
1070  * with large buffers with sufficient headroom - so it should affect
1071  * at most queue size packets.
1072  * Afterwards, the conditions to enable
1073  * XDP should preclude the underlying device from sending packets
1074  * across multiple buffers (num_buf > 1), and we make sure buffers
1075  * have enough headroom.
1076  */
1077 static struct page *xdp_linearize_page(struct receive_queue *rq,
1078                                        int *num_buf,
1079                                        struct page *p,
1080                                        int offset,
1081                                        int page_off,
1082                                        unsigned int *len)
1083 {
1084         int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1085         struct page *page;
1086
1087         if (page_off + *len + tailroom > PAGE_SIZE)
1088                 return NULL;
1089
1090         page = alloc_page(GFP_ATOMIC);
1091         if (!page)
1092                 return NULL;
1093
1094         memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1095         page_off += *len;
1096
1097         while (--*num_buf) {
1098                 unsigned int buflen;
1099                 void *buf;
1100                 int off;
1101
1102                 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1103                 if (unlikely(!buf))
1104                         goto err_buf;
1105
1106                 p = virt_to_head_page(buf);
1107                 off = buf - page_address(p);
1108
1109                 /* guard against a misconfigured or uncooperative backend that
1110                  * is sending packet larger than the MTU.
1111                  */
1112                 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1113                         put_page(p);
1114                         goto err_buf;
1115                 }
1116
1117                 memcpy(page_address(page) + page_off,
1118                        page_address(p) + off, buflen);
1119                 page_off += buflen;
1120                 put_page(p);
1121         }
1122
1123         /* Headroom does not contribute to packet length */
1124         *len = page_off - VIRTIO_XDP_HEADROOM;
1125         return page;
1126 err_buf:
1127         __free_pages(page, 0);
1128         return NULL;
1129 }
1130
1131 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1132                                                unsigned int xdp_headroom,
1133                                                void *buf,
1134                                                unsigned int len)
1135 {
1136         unsigned int header_offset;
1137         unsigned int headroom;
1138         unsigned int buflen;
1139         struct sk_buff *skb;
1140
1141         header_offset = VIRTNET_RX_PAD + xdp_headroom;
1142         headroom = vi->hdr_len + header_offset;
1143         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1144                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1145
1146         skb = virtnet_build_skb(buf, buflen, headroom, len);
1147         if (unlikely(!skb))
1148                 return NULL;
1149
1150         buf += header_offset;
1151         memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1152
1153         return skb;
1154 }
1155
1156 static struct sk_buff *receive_small_xdp(struct net_device *dev,
1157                                          struct virtnet_info *vi,
1158                                          struct receive_queue *rq,
1159                                          struct bpf_prog *xdp_prog,
1160                                          void *buf,
1161                                          unsigned int xdp_headroom,
1162                                          unsigned int len,
1163                                          unsigned int *xdp_xmit,
1164                                          struct virtnet_rq_stats *stats)
1165 {
1166         unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1167         unsigned int headroom = vi->hdr_len + header_offset;
1168         struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1169         struct page *page = virt_to_head_page(buf);
1170         struct page *xdp_page;
1171         unsigned int buflen;
1172         struct xdp_buff xdp;
1173         struct sk_buff *skb;
1174         unsigned int metasize = 0;
1175         u32 act;
1176
1177         if (unlikely(hdr->hdr.gso_type))
1178                 goto err_xdp;
1179
1180         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1181                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1182
1183         if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1184                 int offset = buf - page_address(page) + header_offset;
1185                 unsigned int tlen = len + vi->hdr_len;
1186                 int num_buf = 1;
1187
1188                 xdp_headroom = virtnet_get_headroom(vi);
1189                 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1190                 headroom = vi->hdr_len + header_offset;
1191                 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1192                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1193                 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1194                                               offset, header_offset,
1195                                               &tlen);
1196                 if (!xdp_page)
1197                         goto err_xdp;
1198
1199                 buf = page_address(xdp_page);
1200                 put_page(page);
1201                 page = xdp_page;
1202         }
1203
1204         xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1205         xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1206                          xdp_headroom, len, true);
1207
1208         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1209
1210         switch (act) {
1211         case XDP_PASS:
1212                 /* Recalculate length in case bpf program changed it */
1213                 len = xdp.data_end - xdp.data;
1214                 metasize = xdp.data - xdp.data_meta;
1215                 break;
1216
1217         case XDP_TX:
1218         case XDP_REDIRECT:
1219                 goto xdp_xmit;
1220
1221         default:
1222                 goto err_xdp;
1223         }
1224
1225         skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1226         if (unlikely(!skb))
1227                 goto err;
1228
1229         if (metasize)
1230                 skb_metadata_set(skb, metasize);
1231
1232         return skb;
1233
1234 err_xdp:
1235         stats->xdp_drops++;
1236 err:
1237         stats->drops++;
1238         put_page(page);
1239 xdp_xmit:
1240         return NULL;
1241 }
1242
1243 static struct sk_buff *receive_small(struct net_device *dev,
1244                                      struct virtnet_info *vi,
1245                                      struct receive_queue *rq,
1246                                      void *buf, void *ctx,
1247                                      unsigned int len,
1248                                      unsigned int *xdp_xmit,
1249                                      struct virtnet_rq_stats *stats)
1250 {
1251         unsigned int xdp_headroom = (unsigned long)ctx;
1252         struct page *page = virt_to_head_page(buf);
1253         struct sk_buff *skb;
1254
1255         len -= vi->hdr_len;
1256         stats->bytes += len;
1257
1258         if (unlikely(len > GOOD_PACKET_LEN)) {
1259                 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1260                          dev->name, len, GOOD_PACKET_LEN);
1261                 dev->stats.rx_length_errors++;
1262                 goto err;
1263         }
1264
1265         if (unlikely(vi->xdp_enabled)) {
1266                 struct bpf_prog *xdp_prog;
1267
1268                 rcu_read_lock();
1269                 xdp_prog = rcu_dereference(rq->xdp_prog);
1270                 if (xdp_prog) {
1271                         skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1272                                                 xdp_headroom, len, xdp_xmit,
1273                                                 stats);
1274                         rcu_read_unlock();
1275                         return skb;
1276                 }
1277                 rcu_read_unlock();
1278         }
1279
1280         skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1281         if (likely(skb))
1282                 return skb;
1283
1284 err:
1285         stats->drops++;
1286         put_page(page);
1287         return NULL;
1288 }
1289
1290 static struct sk_buff *receive_big(struct net_device *dev,
1291                                    struct virtnet_info *vi,
1292                                    struct receive_queue *rq,
1293                                    void *buf,
1294                                    unsigned int len,
1295                                    struct virtnet_rq_stats *stats)
1296 {
1297         struct page *page = buf;
1298         struct sk_buff *skb =
1299                 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1300
1301         stats->bytes += len - vi->hdr_len;
1302         if (unlikely(!skb))
1303                 goto err;
1304
1305         return skb;
1306
1307 err:
1308         stats->drops++;
1309         give_pages(rq, page);
1310         return NULL;
1311 }
1312
1313 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1314                                struct net_device *dev,
1315                                struct virtnet_rq_stats *stats)
1316 {
1317         struct page *page;
1318         void *buf;
1319         int len;
1320
1321         while (num_buf-- > 1) {
1322                 buf = virtnet_rq_get_buf(rq, &len, NULL);
1323                 if (unlikely(!buf)) {
1324                         pr_debug("%s: rx error: %d buffers missing\n",
1325                                  dev->name, num_buf);
1326                         dev->stats.rx_length_errors++;
1327                         break;
1328                 }
1329                 stats->bytes += len;
1330                 page = virt_to_head_page(buf);
1331                 put_page(page);
1332         }
1333 }
1334
1335 /* Why not use xdp_build_skb_from_frame() ?
1336  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1337  * virtio-net there are 2 points that do not match its requirements:
1338  *  1. The size of the prefilled buffer is not fixed before xdp is set.
1339  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1340  *     like eth_type_trans() (which virtio-net does in receive_buf()).
1341  */
1342 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1343                                                struct virtnet_info *vi,
1344                                                struct xdp_buff *xdp,
1345                                                unsigned int xdp_frags_truesz)
1346 {
1347         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1348         unsigned int headroom, data_len;
1349         struct sk_buff *skb;
1350         int metasize;
1351         u8 nr_frags;
1352
1353         if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1354                 pr_debug("Error building skb as missing reserved tailroom for xdp");
1355                 return NULL;
1356         }
1357
1358         if (unlikely(xdp_buff_has_frags(xdp)))
1359                 nr_frags = sinfo->nr_frags;
1360
1361         skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1362         if (unlikely(!skb))
1363                 return NULL;
1364
1365         headroom = xdp->data - xdp->data_hard_start;
1366         data_len = xdp->data_end - xdp->data;
1367         skb_reserve(skb, headroom);
1368         __skb_put(skb, data_len);
1369
1370         metasize = xdp->data - xdp->data_meta;
1371         metasize = metasize > 0 ? metasize : 0;
1372         if (metasize)
1373                 skb_metadata_set(skb, metasize);
1374
1375         if (unlikely(xdp_buff_has_frags(xdp)))
1376                 xdp_update_skb_shared_info(skb, nr_frags,
1377                                            sinfo->xdp_frags_size,
1378                                            xdp_frags_truesz,
1379                                            xdp_buff_is_frag_pfmemalloc(xdp));
1380
1381         return skb;
1382 }
1383
1384 /* TODO: build xdp in big mode */
1385 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1386                                       struct virtnet_info *vi,
1387                                       struct receive_queue *rq,
1388                                       struct xdp_buff *xdp,
1389                                       void *buf,
1390                                       unsigned int len,
1391                                       unsigned int frame_sz,
1392                                       int *num_buf,
1393                                       unsigned int *xdp_frags_truesize,
1394                                       struct virtnet_rq_stats *stats)
1395 {
1396         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1397         unsigned int headroom, tailroom, room;
1398         unsigned int truesize, cur_frag_size;
1399         struct skb_shared_info *shinfo;
1400         unsigned int xdp_frags_truesz = 0;
1401         struct page *page;
1402         skb_frag_t *frag;
1403         int offset;
1404         void *ctx;
1405
1406         xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1407         xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1408                          VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1409
1410         if (!*num_buf)
1411                 return 0;
1412
1413         if (*num_buf > 1) {
1414                 /* If we want to build multi-buffer xdp, we need
1415                  * to specify that the flags of xdp_buff have the
1416                  * XDP_FLAGS_HAS_FRAG bit.
1417                  */
1418                 if (!xdp_buff_has_frags(xdp))
1419                         xdp_buff_set_frags_flag(xdp);
1420
1421                 shinfo = xdp_get_shared_info_from_buff(xdp);
1422                 shinfo->nr_frags = 0;
1423                 shinfo->xdp_frags_size = 0;
1424         }
1425
1426         if (*num_buf > MAX_SKB_FRAGS + 1)
1427                 return -EINVAL;
1428
1429         while (--*num_buf > 0) {
1430                 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1431                 if (unlikely(!buf)) {
1432                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1433                                  dev->name, *num_buf,
1434                                  virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1435                         dev->stats.rx_length_errors++;
1436                         goto err;
1437                 }
1438
1439                 stats->bytes += len;
1440                 page = virt_to_head_page(buf);
1441                 offset = buf - page_address(page);
1442
1443                 truesize = mergeable_ctx_to_truesize(ctx);
1444                 headroom = mergeable_ctx_to_headroom(ctx);
1445                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1446                 room = SKB_DATA_ALIGN(headroom + tailroom);
1447
1448                 cur_frag_size = truesize;
1449                 xdp_frags_truesz += cur_frag_size;
1450                 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1451                         put_page(page);
1452                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1453                                  dev->name, len, (unsigned long)(truesize - room));
1454                         dev->stats.rx_length_errors++;
1455                         goto err;
1456                 }
1457
1458                 frag = &shinfo->frags[shinfo->nr_frags++];
1459                 skb_frag_fill_page_desc(frag, page, offset, len);
1460                 if (page_is_pfmemalloc(page))
1461                         xdp_buff_set_frag_pfmemalloc(xdp);
1462
1463                 shinfo->xdp_frags_size += len;
1464         }
1465
1466         *xdp_frags_truesize = xdp_frags_truesz;
1467         return 0;
1468
1469 err:
1470         put_xdp_frags(xdp);
1471         return -EINVAL;
1472 }
1473
1474 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1475                                    struct receive_queue *rq,
1476                                    struct bpf_prog *xdp_prog,
1477                                    void *ctx,
1478                                    unsigned int *frame_sz,
1479                                    int *num_buf,
1480                                    struct page **page,
1481                                    int offset,
1482                                    unsigned int *len,
1483                                    struct virtio_net_hdr_mrg_rxbuf *hdr)
1484 {
1485         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1486         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1487         struct page *xdp_page;
1488         unsigned int xdp_room;
1489
1490         /* Transient failure which in theory could occur if
1491          * in-flight packets from before XDP was enabled reach
1492          * the receive path after XDP is loaded.
1493          */
1494         if (unlikely(hdr->hdr.gso_type))
1495                 return NULL;
1496
1497         /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1498          * with headroom may add hole in truesize, which
1499          * make their length exceed PAGE_SIZE. So we disabled the
1500          * hole mechanism for xdp. See add_recvbuf_mergeable().
1501          */
1502         *frame_sz = truesize;
1503
1504         if (likely(headroom >= virtnet_get_headroom(vi) &&
1505                    (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1506                 return page_address(*page) + offset;
1507         }
1508
1509         /* This happens when headroom is not enough because
1510          * of the buffer was prefilled before XDP is set.
1511          * This should only happen for the first several packets.
1512          * In fact, vq reset can be used here to help us clean up
1513          * the prefilled buffers, but many existing devices do not
1514          * support it, and we don't want to bother users who are
1515          * using xdp normally.
1516          */
1517         if (!xdp_prog->aux->xdp_has_frags) {
1518                 /* linearize data for XDP */
1519                 xdp_page = xdp_linearize_page(rq, num_buf,
1520                                               *page, offset,
1521                                               VIRTIO_XDP_HEADROOM,
1522                                               len);
1523                 if (!xdp_page)
1524                         return NULL;
1525         } else {
1526                 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1527                                           sizeof(struct skb_shared_info));
1528                 if (*len + xdp_room > PAGE_SIZE)
1529                         return NULL;
1530
1531                 xdp_page = alloc_page(GFP_ATOMIC);
1532                 if (!xdp_page)
1533                         return NULL;
1534
1535                 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1536                        page_address(*page) + offset, *len);
1537         }
1538
1539         *frame_sz = PAGE_SIZE;
1540
1541         put_page(*page);
1542
1543         *page = xdp_page;
1544
1545         return page_address(*page) + VIRTIO_XDP_HEADROOM;
1546 }
1547
1548 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1549                                              struct virtnet_info *vi,
1550                                              struct receive_queue *rq,
1551                                              struct bpf_prog *xdp_prog,
1552                                              void *buf,
1553                                              void *ctx,
1554                                              unsigned int len,
1555                                              unsigned int *xdp_xmit,
1556                                              struct virtnet_rq_stats *stats)
1557 {
1558         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1559         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1560         struct page *page = virt_to_head_page(buf);
1561         int offset = buf - page_address(page);
1562         unsigned int xdp_frags_truesz = 0;
1563         struct sk_buff *head_skb;
1564         unsigned int frame_sz;
1565         struct xdp_buff xdp;
1566         void *data;
1567         u32 act;
1568         int err;
1569
1570         data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1571                                      offset, &len, hdr);
1572         if (unlikely(!data))
1573                 goto err_xdp;
1574
1575         err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1576                                          &num_buf, &xdp_frags_truesz, stats);
1577         if (unlikely(err))
1578                 goto err_xdp;
1579
1580         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1581
1582         switch (act) {
1583         case XDP_PASS:
1584                 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1585                 if (unlikely(!head_skb))
1586                         break;
1587                 return head_skb;
1588
1589         case XDP_TX:
1590         case XDP_REDIRECT:
1591                 return NULL;
1592
1593         default:
1594                 break;
1595         }
1596
1597         put_xdp_frags(&xdp);
1598
1599 err_xdp:
1600         put_page(page);
1601         mergeable_buf_free(rq, num_buf, dev, stats);
1602
1603         stats->xdp_drops++;
1604         stats->drops++;
1605         return NULL;
1606 }
1607
1608 static struct sk_buff *receive_mergeable(struct net_device *dev,
1609                                          struct virtnet_info *vi,
1610                                          struct receive_queue *rq,
1611                                          void *buf,
1612                                          void *ctx,
1613                                          unsigned int len,
1614                                          unsigned int *xdp_xmit,
1615                                          struct virtnet_rq_stats *stats)
1616 {
1617         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1618         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1619         struct page *page = virt_to_head_page(buf);
1620         int offset = buf - page_address(page);
1621         struct sk_buff *head_skb, *curr_skb;
1622         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1623         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1624         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1625         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1626
1627         head_skb = NULL;
1628         stats->bytes += len - vi->hdr_len;
1629
1630         if (unlikely(len > truesize - room)) {
1631                 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1632                          dev->name, len, (unsigned long)(truesize - room));
1633                 dev->stats.rx_length_errors++;
1634                 goto err_skb;
1635         }
1636
1637         if (unlikely(vi->xdp_enabled)) {
1638                 struct bpf_prog *xdp_prog;
1639
1640                 rcu_read_lock();
1641                 xdp_prog = rcu_dereference(rq->xdp_prog);
1642                 if (xdp_prog) {
1643                         head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1644                                                          len, xdp_xmit, stats);
1645                         rcu_read_unlock();
1646                         return head_skb;
1647                 }
1648                 rcu_read_unlock();
1649         }
1650
1651         head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1652         curr_skb = head_skb;
1653
1654         if (unlikely(!curr_skb))
1655                 goto err_skb;
1656         while (--num_buf) {
1657                 int num_skb_frags;
1658
1659                 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1660                 if (unlikely(!buf)) {
1661                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1662                                  dev->name, num_buf,
1663                                  virtio16_to_cpu(vi->vdev,
1664                                                  hdr->num_buffers));
1665                         dev->stats.rx_length_errors++;
1666                         goto err_buf;
1667                 }
1668
1669                 stats->bytes += len;
1670                 page = virt_to_head_page(buf);
1671
1672                 truesize = mergeable_ctx_to_truesize(ctx);
1673                 headroom = mergeable_ctx_to_headroom(ctx);
1674                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1675                 room = SKB_DATA_ALIGN(headroom + tailroom);
1676                 if (unlikely(len > truesize - room)) {
1677                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1678                                  dev->name, len, (unsigned long)(truesize - room));
1679                         dev->stats.rx_length_errors++;
1680                         goto err_skb;
1681                 }
1682
1683                 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1684                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1685                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1686
1687                         if (unlikely(!nskb))
1688                                 goto err_skb;
1689                         if (curr_skb == head_skb)
1690                                 skb_shinfo(curr_skb)->frag_list = nskb;
1691                         else
1692                                 curr_skb->next = nskb;
1693                         curr_skb = nskb;
1694                         head_skb->truesize += nskb->truesize;
1695                         num_skb_frags = 0;
1696                 }
1697                 if (curr_skb != head_skb) {
1698                         head_skb->data_len += len;
1699                         head_skb->len += len;
1700                         head_skb->truesize += truesize;
1701                 }
1702                 offset = buf - page_address(page);
1703                 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1704                         put_page(page);
1705                         skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1706                                              len, truesize);
1707                 } else {
1708                         skb_add_rx_frag(curr_skb, num_skb_frags, page,
1709                                         offset, len, truesize);
1710                 }
1711         }
1712
1713         ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1714         return head_skb;
1715
1716 err_skb:
1717         put_page(page);
1718         mergeable_buf_free(rq, num_buf, dev, stats);
1719
1720 err_buf:
1721         stats->drops++;
1722         dev_kfree_skb(head_skb);
1723         return NULL;
1724 }
1725
1726 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1727                                 struct sk_buff *skb)
1728 {
1729         enum pkt_hash_types rss_hash_type;
1730
1731         if (!hdr_hash || !skb)
1732                 return;
1733
1734         switch (__le16_to_cpu(hdr_hash->hash_report)) {
1735         case VIRTIO_NET_HASH_REPORT_TCPv4:
1736         case VIRTIO_NET_HASH_REPORT_UDPv4:
1737         case VIRTIO_NET_HASH_REPORT_TCPv6:
1738         case VIRTIO_NET_HASH_REPORT_UDPv6:
1739         case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1740         case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1741                 rss_hash_type = PKT_HASH_TYPE_L4;
1742                 break;
1743         case VIRTIO_NET_HASH_REPORT_IPv4:
1744         case VIRTIO_NET_HASH_REPORT_IPv6:
1745         case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1746                 rss_hash_type = PKT_HASH_TYPE_L3;
1747                 break;
1748         case VIRTIO_NET_HASH_REPORT_NONE:
1749         default:
1750                 rss_hash_type = PKT_HASH_TYPE_NONE;
1751         }
1752         skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1753 }
1754
1755 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1756                         void *buf, unsigned int len, void **ctx,
1757                         unsigned int *xdp_xmit,
1758                         struct virtnet_rq_stats *stats)
1759 {
1760         struct net_device *dev = vi->dev;
1761         struct sk_buff *skb;
1762         struct virtio_net_common_hdr *hdr;
1763
1764         if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1765                 pr_debug("%s: short packet %i\n", dev->name, len);
1766                 dev->stats.rx_length_errors++;
1767                 virtnet_rq_free_unused_buf(rq->vq, buf);
1768                 return;
1769         }
1770
1771         if (vi->mergeable_rx_bufs)
1772                 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1773                                         stats);
1774         else if (vi->big_packets)
1775                 skb = receive_big(dev, vi, rq, buf, len, stats);
1776         else
1777                 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1778
1779         if (unlikely(!skb))
1780                 return;
1781
1782         hdr = skb_vnet_common_hdr(skb);
1783         if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1784                 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
1785
1786         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1787                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1788
1789         if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1790                                   virtio_is_little_endian(vi->vdev))) {
1791                 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1792                                      dev->name, hdr->hdr.gso_type,
1793                                      hdr->hdr.gso_size);
1794                 goto frame_err;
1795         }
1796
1797         skb_record_rx_queue(skb, vq2rxq(rq->vq));
1798         skb->protocol = eth_type_trans(skb, dev);
1799         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1800                  ntohs(skb->protocol), skb->len, skb->pkt_type);
1801
1802         napi_gro_receive(&rq->napi, skb);
1803         return;
1804
1805 frame_err:
1806         dev->stats.rx_frame_errors++;
1807         dev_kfree_skb(skb);
1808 }
1809
1810 /* Unlike mergeable buffers, all buffers are allocated to the
1811  * same size, except for the headroom. For this reason we do
1812  * not need to use  mergeable_len_to_ctx here - it is enough
1813  * to store the headroom as the context ignoring the truesize.
1814  */
1815 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1816                              gfp_t gfp)
1817 {
1818         char *buf;
1819         unsigned int xdp_headroom = virtnet_get_headroom(vi);
1820         void *ctx = (void *)(unsigned long)xdp_headroom;
1821         int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1822         int err;
1823
1824         len = SKB_DATA_ALIGN(len) +
1825               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1826
1827         buf = virtnet_rq_alloc(rq, len, gfp);
1828         if (unlikely(!buf))
1829                 return -ENOMEM;
1830
1831         virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1832                                vi->hdr_len + GOOD_PACKET_LEN);
1833
1834         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1835         if (err < 0) {
1836                 if (rq->do_dma)
1837                         virtnet_rq_unmap(rq, buf, 0);
1838                 put_page(virt_to_head_page(buf));
1839         }
1840
1841         return err;
1842 }
1843
1844 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1845                            gfp_t gfp)
1846 {
1847         struct page *first, *list = NULL;
1848         char *p;
1849         int i, err, offset;
1850
1851         sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1852
1853         /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1854         for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1855                 first = get_a_page(rq, gfp);
1856                 if (!first) {
1857                         if (list)
1858                                 give_pages(rq, list);
1859                         return -ENOMEM;
1860                 }
1861                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1862
1863                 /* chain new page in list head to match sg */
1864                 first->private = (unsigned long)list;
1865                 list = first;
1866         }
1867
1868         first = get_a_page(rq, gfp);
1869         if (!first) {
1870                 give_pages(rq, list);
1871                 return -ENOMEM;
1872         }
1873         p = page_address(first);
1874
1875         /* rq->sg[0], rq->sg[1] share the same page */
1876         /* a separated rq->sg[0] for header - required in case !any_header_sg */
1877         sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1878
1879         /* rq->sg[1] for data packet, from offset */
1880         offset = sizeof(struct padded_vnet_hdr);
1881         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1882
1883         /* chain first in list head */
1884         first->private = (unsigned long)list;
1885         err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1886                                   first, gfp);
1887         if (err < 0)
1888                 give_pages(rq, first);
1889
1890         return err;
1891 }
1892
1893 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1894                                           struct ewma_pkt_len *avg_pkt_len,
1895                                           unsigned int room)
1896 {
1897         struct virtnet_info *vi = rq->vq->vdev->priv;
1898         const size_t hdr_len = vi->hdr_len;
1899         unsigned int len;
1900
1901         if (room)
1902                 return PAGE_SIZE - room;
1903
1904         len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1905                                 rq->min_buf_len, PAGE_SIZE - hdr_len);
1906
1907         return ALIGN(len, L1_CACHE_BYTES);
1908 }
1909
1910 static int add_recvbuf_mergeable(struct virtnet_info *vi,
1911                                  struct receive_queue *rq, gfp_t gfp)
1912 {
1913         struct page_frag *alloc_frag = &rq->alloc_frag;
1914         unsigned int headroom = virtnet_get_headroom(vi);
1915         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1916         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1917         unsigned int len, hole;
1918         void *ctx;
1919         char *buf;
1920         int err;
1921
1922         /* Extra tailroom is needed to satisfy XDP's assumption. This
1923          * means rx frags coalescing won't work, but consider we've
1924          * disabled GSO for XDP, it won't be a big issue.
1925          */
1926         len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1927
1928         buf = virtnet_rq_alloc(rq, len + room, gfp);
1929         if (unlikely(!buf))
1930                 return -ENOMEM;
1931
1932         buf += headroom; /* advance address leaving hole at front of pkt */
1933         hole = alloc_frag->size - alloc_frag->offset;
1934         if (hole < len + room) {
1935                 /* To avoid internal fragmentation, if there is very likely not
1936                  * enough space for another buffer, add the remaining space to
1937                  * the current buffer.
1938                  * XDP core assumes that frame_size of xdp_buff and the length
1939                  * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1940                  */
1941                 if (!headroom)
1942                         len += hole;
1943                 alloc_frag->offset += hole;
1944         }
1945
1946         virtnet_rq_init_one_sg(rq, buf, len);
1947
1948         ctx = mergeable_len_to_ctx(len + room, headroom);
1949         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1950         if (err < 0) {
1951                 if (rq->do_dma)
1952                         virtnet_rq_unmap(rq, buf, 0);
1953                 put_page(virt_to_head_page(buf));
1954         }
1955
1956         return err;
1957 }
1958
1959 /*
1960  * Returns false if we couldn't fill entirely (OOM).
1961  *
1962  * Normally run in the receive path, but can also be run from ndo_open
1963  * before we're receiving packets, or from refill_work which is
1964  * careful to disable receiving (using napi_disable).
1965  */
1966 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1967                           gfp_t gfp)
1968 {
1969         int err;
1970         bool oom;
1971
1972         do {
1973                 if (vi->mergeable_rx_bufs)
1974                         err = add_recvbuf_mergeable(vi, rq, gfp);
1975                 else if (vi->big_packets)
1976                         err = add_recvbuf_big(vi, rq, gfp);
1977                 else
1978                         err = add_recvbuf_small(vi, rq, gfp);
1979
1980                 oom = err == -ENOMEM;
1981                 if (err)
1982                         break;
1983         } while (rq->vq->num_free);
1984         if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
1985                 unsigned long flags;
1986
1987                 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1988                 rq->stats.kicks++;
1989                 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
1990         }
1991
1992         return !oom;
1993 }
1994
1995 static void skb_recv_done(struct virtqueue *rvq)
1996 {
1997         struct virtnet_info *vi = rvq->vdev->priv;
1998         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1999
2000         virtqueue_napi_schedule(&rq->napi, rvq);
2001 }
2002
2003 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2004 {
2005         napi_enable(napi);
2006
2007         /* If all buffers were filled by other side before we napi_enabled, we
2008          * won't get another interrupt, so process any outstanding packets now.
2009          * Call local_bh_enable after to trigger softIRQ processing.
2010          */
2011         local_bh_disable();
2012         virtqueue_napi_schedule(napi, vq);
2013         local_bh_enable();
2014 }
2015
2016 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2017                                    struct virtqueue *vq,
2018                                    struct napi_struct *napi)
2019 {
2020         if (!napi->weight)
2021                 return;
2022
2023         /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2024          * enable the feature if this is likely affine with the transmit path.
2025          */
2026         if (!vi->affinity_hint_set) {
2027                 napi->weight = 0;
2028                 return;
2029         }
2030
2031         return virtnet_napi_enable(vq, napi);
2032 }
2033
2034 static void virtnet_napi_tx_disable(struct napi_struct *napi)
2035 {
2036         if (napi->weight)
2037                 napi_disable(napi);
2038 }
2039
2040 static void refill_work(struct work_struct *work)
2041 {
2042         struct virtnet_info *vi =
2043                 container_of(work, struct virtnet_info, refill.work);
2044         bool still_empty;
2045         int i;
2046
2047         for (i = 0; i < vi->curr_queue_pairs; i++) {
2048                 struct receive_queue *rq = &vi->rq[i];
2049
2050                 napi_disable(&rq->napi);
2051                 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2052                 virtnet_napi_enable(rq->vq, &rq->napi);
2053
2054                 /* In theory, this can happen: if we don't get any buffers in
2055                  * we will *never* try to fill again.
2056                  */
2057                 if (still_empty)
2058                         schedule_delayed_work(&vi->refill, HZ/2);
2059         }
2060 }
2061
2062 static int virtnet_receive(struct receive_queue *rq, int budget,
2063                            unsigned int *xdp_xmit)
2064 {
2065         struct virtnet_info *vi = rq->vq->vdev->priv;
2066         struct virtnet_rq_stats stats = {};
2067         unsigned int len;
2068         void *buf;
2069         int i;
2070
2071         if (!vi->big_packets || vi->mergeable_rx_bufs) {
2072                 void *ctx;
2073
2074                 while (stats.packets < budget &&
2075                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2076                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2077                         stats.packets++;
2078                 }
2079         } else {
2080                 while (stats.packets < budget &&
2081                        (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2082                         receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2083                         stats.packets++;
2084                 }
2085         }
2086
2087         if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2088                 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2089                         spin_lock(&vi->refill_lock);
2090                         if (vi->refill_enabled)
2091                                 schedule_delayed_work(&vi->refill, 0);
2092                         spin_unlock(&vi->refill_lock);
2093                 }
2094         }
2095
2096         u64_stats_update_begin(&rq->stats.syncp);
2097         for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2098                 size_t offset = virtnet_rq_stats_desc[i].offset;
2099                 u64 *item;
2100
2101                 item = (u64 *)((u8 *)&rq->stats + offset);
2102                 *item += *(u64 *)((u8 *)&stats + offset);
2103         }
2104         u64_stats_update_end(&rq->stats.syncp);
2105
2106         return stats.packets;
2107 }
2108
2109 static void virtnet_poll_cleantx(struct receive_queue *rq)
2110 {
2111         struct virtnet_info *vi = rq->vq->vdev->priv;
2112         unsigned int index = vq2rxq(rq->vq);
2113         struct send_queue *sq = &vi->sq[index];
2114         struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2115
2116         if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2117                 return;
2118
2119         if (__netif_tx_trylock(txq)) {
2120                 if (sq->reset) {
2121                         __netif_tx_unlock(txq);
2122                         return;
2123                 }
2124
2125                 do {
2126                         virtqueue_disable_cb(sq->vq);
2127                         free_old_xmit_skbs(sq, true);
2128                 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2129
2130                 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2131                         netif_tx_wake_queue(txq);
2132
2133                 __netif_tx_unlock(txq);
2134         }
2135 }
2136
2137 static int virtnet_poll(struct napi_struct *napi, int budget)
2138 {
2139         struct receive_queue *rq =
2140                 container_of(napi, struct receive_queue, napi);
2141         struct virtnet_info *vi = rq->vq->vdev->priv;
2142         struct send_queue *sq;
2143         unsigned int received;
2144         unsigned int xdp_xmit = 0;
2145
2146         virtnet_poll_cleantx(rq);
2147
2148         received = virtnet_receive(rq, budget, &xdp_xmit);
2149
2150         if (xdp_xmit & VIRTIO_XDP_REDIR)
2151                 xdp_do_flush();
2152
2153         /* Out of packets? */
2154         if (received < budget)
2155                 virtqueue_napi_complete(napi, rq->vq, received);
2156
2157         if (xdp_xmit & VIRTIO_XDP_TX) {
2158                 sq = virtnet_xdp_get_sq(vi);
2159                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2160                         u64_stats_update_begin(&sq->stats.syncp);
2161                         sq->stats.kicks++;
2162                         u64_stats_update_end(&sq->stats.syncp);
2163                 }
2164                 virtnet_xdp_put_sq(vi, sq);
2165         }
2166
2167         return received;
2168 }
2169
2170 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2171 {
2172         virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2173         napi_disable(&vi->rq[qp_index].napi);
2174         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2175 }
2176
2177 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2178 {
2179         struct net_device *dev = vi->dev;
2180         int err;
2181
2182         err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2183                                vi->rq[qp_index].napi.napi_id);
2184         if (err < 0)
2185                 return err;
2186
2187         err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2188                                          MEM_TYPE_PAGE_SHARED, NULL);
2189         if (err < 0)
2190                 goto err_xdp_reg_mem_model;
2191
2192         virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2193         virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2194
2195         return 0;
2196
2197 err_xdp_reg_mem_model:
2198         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2199         return err;
2200 }
2201
2202 static int virtnet_open(struct net_device *dev)
2203 {
2204         struct virtnet_info *vi = netdev_priv(dev);
2205         int i, err;
2206
2207         enable_delayed_refill(vi);
2208
2209         for (i = 0; i < vi->max_queue_pairs; i++) {
2210                 if (i < vi->curr_queue_pairs)
2211                         /* Make sure we have some buffers: if oom use wq. */
2212                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2213                                 schedule_delayed_work(&vi->refill, 0);
2214
2215                 err = virtnet_enable_queue_pair(vi, i);
2216                 if (err < 0)
2217                         goto err_enable_qp;
2218         }
2219
2220         return 0;
2221
2222 err_enable_qp:
2223         disable_delayed_refill(vi);
2224         cancel_delayed_work_sync(&vi->refill);
2225
2226         for (i--; i >= 0; i--)
2227                 virtnet_disable_queue_pair(vi, i);
2228         return err;
2229 }
2230
2231 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2232 {
2233         struct send_queue *sq = container_of(napi, struct send_queue, napi);
2234         struct virtnet_info *vi = sq->vq->vdev->priv;
2235         unsigned int index = vq2txq(sq->vq);
2236         struct netdev_queue *txq;
2237         int opaque;
2238         bool done;
2239
2240         if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2241                 /* We don't need to enable cb for XDP */
2242                 napi_complete_done(napi, 0);
2243                 return 0;
2244         }
2245
2246         txq = netdev_get_tx_queue(vi->dev, index);
2247         __netif_tx_lock(txq, raw_smp_processor_id());
2248         virtqueue_disable_cb(sq->vq);
2249         free_old_xmit_skbs(sq, true);
2250
2251         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2252                 netif_tx_wake_queue(txq);
2253
2254         opaque = virtqueue_enable_cb_prepare(sq->vq);
2255
2256         done = napi_complete_done(napi, 0);
2257
2258         if (!done)
2259                 virtqueue_disable_cb(sq->vq);
2260
2261         __netif_tx_unlock(txq);
2262
2263         if (done) {
2264                 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2265                         if (napi_schedule_prep(napi)) {
2266                                 __netif_tx_lock(txq, raw_smp_processor_id());
2267                                 virtqueue_disable_cb(sq->vq);
2268                                 __netif_tx_unlock(txq);
2269                                 __napi_schedule(napi);
2270                         }
2271                 }
2272         }
2273
2274         return 0;
2275 }
2276
2277 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2278 {
2279         struct virtio_net_hdr_mrg_rxbuf *hdr;
2280         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2281         struct virtnet_info *vi = sq->vq->vdev->priv;
2282         int num_sg;
2283         unsigned hdr_len = vi->hdr_len;
2284         bool can_push;
2285
2286         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2287
2288         can_push = vi->any_header_sg &&
2289                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2290                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2291         /* Even if we can, don't push here yet as this would skew
2292          * csum_start offset below. */
2293         if (can_push)
2294                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2295         else
2296                 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2297
2298         if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2299                                     virtio_is_little_endian(vi->vdev), false,
2300                                     0))
2301                 return -EPROTO;
2302
2303         if (vi->mergeable_rx_bufs)
2304                 hdr->num_buffers = 0;
2305
2306         sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2307         if (can_push) {
2308                 __skb_push(skb, hdr_len);
2309                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2310                 if (unlikely(num_sg < 0))
2311                         return num_sg;
2312                 /* Pull header back to avoid skew in tx bytes calculations. */
2313                 __skb_pull(skb, hdr_len);
2314         } else {
2315                 sg_set_buf(sq->sg, hdr, hdr_len);
2316                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2317                 if (unlikely(num_sg < 0))
2318                         return num_sg;
2319                 num_sg++;
2320         }
2321         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2322 }
2323
2324 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2325 {
2326         struct virtnet_info *vi = netdev_priv(dev);
2327         int qnum = skb_get_queue_mapping(skb);
2328         struct send_queue *sq = &vi->sq[qnum];
2329         int err;
2330         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2331         bool kick = !netdev_xmit_more();
2332         bool use_napi = sq->napi.weight;
2333
2334         /* Free up any pending old buffers before queueing new ones. */
2335         do {
2336                 if (use_napi)
2337                         virtqueue_disable_cb(sq->vq);
2338
2339                 free_old_xmit_skbs(sq, false);
2340
2341         } while (use_napi && kick &&
2342                unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2343
2344         /* timestamp packet in software */
2345         skb_tx_timestamp(skb);
2346
2347         /* Try to transmit */
2348         err = xmit_skb(sq, skb);
2349
2350         /* This should not happen! */
2351         if (unlikely(err)) {
2352                 dev->stats.tx_fifo_errors++;
2353                 if (net_ratelimit())
2354                         dev_warn(&dev->dev,
2355                                  "Unexpected TXQ (%d) queue failure: %d\n",
2356                                  qnum, err);
2357                 dev->stats.tx_dropped++;
2358                 dev_kfree_skb_any(skb);
2359                 return NETDEV_TX_OK;
2360         }
2361
2362         /* Don't wait up for transmitted skbs to be freed. */
2363         if (!use_napi) {
2364                 skb_orphan(skb);
2365                 nf_reset_ct(skb);
2366         }
2367
2368         check_sq_full_and_disable(vi, dev, sq);
2369
2370         if (kick || netif_xmit_stopped(txq)) {
2371                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2372                         u64_stats_update_begin(&sq->stats.syncp);
2373                         sq->stats.kicks++;
2374                         u64_stats_update_end(&sq->stats.syncp);
2375                 }
2376         }
2377
2378         return NETDEV_TX_OK;
2379 }
2380
2381 static int virtnet_rx_resize(struct virtnet_info *vi,
2382                              struct receive_queue *rq, u32 ring_num)
2383 {
2384         bool running = netif_running(vi->dev);
2385         int err, qindex;
2386
2387         qindex = rq - vi->rq;
2388
2389         if (running)
2390                 napi_disable(&rq->napi);
2391
2392         err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
2393         if (err)
2394                 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2395
2396         if (!try_fill_recv(vi, rq, GFP_KERNEL))
2397                 schedule_delayed_work(&vi->refill, 0);
2398
2399         if (running)
2400                 virtnet_napi_enable(rq->vq, &rq->napi);
2401         return err;
2402 }
2403
2404 static int virtnet_tx_resize(struct virtnet_info *vi,
2405                              struct send_queue *sq, u32 ring_num)
2406 {
2407         bool running = netif_running(vi->dev);
2408         struct netdev_queue *txq;
2409         int err, qindex;
2410
2411         qindex = sq - vi->sq;
2412
2413         if (running)
2414                 virtnet_napi_tx_disable(&sq->napi);
2415
2416         txq = netdev_get_tx_queue(vi->dev, qindex);
2417
2418         /* 1. wait all ximt complete
2419          * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2420          */
2421         __netif_tx_lock_bh(txq);
2422
2423         /* Prevent rx poll from accessing sq. */
2424         sq->reset = true;
2425
2426         /* Prevent the upper layer from trying to send packets. */
2427         netif_stop_subqueue(vi->dev, qindex);
2428
2429         __netif_tx_unlock_bh(txq);
2430
2431         err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2432         if (err)
2433                 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2434
2435         __netif_tx_lock_bh(txq);
2436         sq->reset = false;
2437         netif_tx_wake_queue(txq);
2438         __netif_tx_unlock_bh(txq);
2439
2440         if (running)
2441                 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2442         return err;
2443 }
2444
2445 /*
2446  * Send command via the control virtqueue and check status.  Commands
2447  * supported by the hypervisor, as indicated by feature bits, should
2448  * never fail unless improperly formatted.
2449  */
2450 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2451                                  struct scatterlist *out)
2452 {
2453         struct scatterlist *sgs[4], hdr, stat;
2454         unsigned out_num = 0, tmp;
2455         int ret;
2456
2457         /* Caller should know better */
2458         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2459
2460         vi->ctrl->status = ~0;
2461         vi->ctrl->hdr.class = class;
2462         vi->ctrl->hdr.cmd = cmd;
2463         /* Add header */
2464         sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2465         sgs[out_num++] = &hdr;
2466
2467         if (out)
2468                 sgs[out_num++] = out;
2469
2470         /* Add return status. */
2471         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2472         sgs[out_num] = &stat;
2473
2474         BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2475         ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2476         if (ret < 0) {
2477                 dev_warn(&vi->vdev->dev,
2478                          "Failed to add sgs for command vq: %d\n.", ret);
2479                 return false;
2480         }
2481
2482         if (unlikely(!virtqueue_kick(vi->cvq)))
2483                 return vi->ctrl->status == VIRTIO_NET_OK;
2484
2485         /* Spin for a response, the kick causes an ioport write, trapping
2486          * into the hypervisor, so the request should be handled immediately.
2487          */
2488         while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2489                !virtqueue_is_broken(vi->cvq))
2490                 cpu_relax();
2491
2492         return vi->ctrl->status == VIRTIO_NET_OK;
2493 }
2494
2495 static int virtnet_set_mac_address(struct net_device *dev, void *p)
2496 {
2497         struct virtnet_info *vi = netdev_priv(dev);
2498         struct virtio_device *vdev = vi->vdev;
2499         int ret;
2500         struct sockaddr *addr;
2501         struct scatterlist sg;
2502
2503         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2504                 return -EOPNOTSUPP;
2505
2506         addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2507         if (!addr)
2508                 return -ENOMEM;
2509
2510         ret = eth_prepare_mac_addr_change(dev, addr);
2511         if (ret)
2512                 goto out;
2513
2514         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2515                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2516                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2517                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2518                         dev_warn(&vdev->dev,
2519                                  "Failed to set mac address by vq command.\n");
2520                         ret = -EINVAL;
2521                         goto out;
2522                 }
2523         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2524                    !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2525                 unsigned int i;
2526
2527                 /* Naturally, this has an atomicity problem. */
2528                 for (i = 0; i < dev->addr_len; i++)
2529                         virtio_cwrite8(vdev,
2530                                        offsetof(struct virtio_net_config, mac) +
2531                                        i, addr->sa_data[i]);
2532         }
2533
2534         eth_commit_mac_addr_change(dev, p);
2535         ret = 0;
2536
2537 out:
2538         kfree(addr);
2539         return ret;
2540 }
2541
2542 static void virtnet_stats(struct net_device *dev,
2543                           struct rtnl_link_stats64 *tot)
2544 {
2545         struct virtnet_info *vi = netdev_priv(dev);
2546         unsigned int start;
2547         int i;
2548
2549         for (i = 0; i < vi->max_queue_pairs; i++) {
2550                 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2551                 struct receive_queue *rq = &vi->rq[i];
2552                 struct send_queue *sq = &vi->sq[i];
2553
2554                 do {
2555                         start = u64_stats_fetch_begin(&sq->stats.syncp);
2556                         tpackets = sq->stats.packets;
2557                         tbytes   = sq->stats.bytes;
2558                         terrors  = sq->stats.tx_timeouts;
2559                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2560
2561                 do {
2562                         start = u64_stats_fetch_begin(&rq->stats.syncp);
2563                         rpackets = rq->stats.packets;
2564                         rbytes   = rq->stats.bytes;
2565                         rdrops   = rq->stats.drops;
2566                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2567
2568                 tot->rx_packets += rpackets;
2569                 tot->tx_packets += tpackets;
2570                 tot->rx_bytes   += rbytes;
2571                 tot->tx_bytes   += tbytes;
2572                 tot->rx_dropped += rdrops;
2573                 tot->tx_errors  += terrors;
2574         }
2575
2576         tot->tx_dropped = dev->stats.tx_dropped;
2577         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
2578         tot->rx_length_errors = dev->stats.rx_length_errors;
2579         tot->rx_frame_errors = dev->stats.rx_frame_errors;
2580 }
2581
2582 static void virtnet_ack_link_announce(struct virtnet_info *vi)
2583 {
2584         rtnl_lock();
2585         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2586                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2587                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2588         rtnl_unlock();
2589 }
2590
2591 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2592 {
2593         struct scatterlist sg;
2594         struct net_device *dev = vi->dev;
2595
2596         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2597                 return 0;
2598
2599         vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2600         sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2601
2602         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2603                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2604                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2605                          queue_pairs);
2606                 return -EINVAL;
2607         } else {
2608                 vi->curr_queue_pairs = queue_pairs;
2609                 /* virtnet_open() will refill when device is going to up. */
2610                 if (dev->flags & IFF_UP)
2611                         schedule_delayed_work(&vi->refill, 0);
2612         }
2613
2614         return 0;
2615 }
2616
2617 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2618 {
2619         int err;
2620
2621         rtnl_lock();
2622         err = _virtnet_set_queues(vi, queue_pairs);
2623         rtnl_unlock();
2624         return err;
2625 }
2626
2627 static int virtnet_close(struct net_device *dev)
2628 {
2629         struct virtnet_info *vi = netdev_priv(dev);
2630         int i;
2631
2632         /* Make sure NAPI doesn't schedule refill work */
2633         disable_delayed_refill(vi);
2634         /* Make sure refill_work doesn't re-enable napi! */
2635         cancel_delayed_work_sync(&vi->refill);
2636
2637         for (i = 0; i < vi->max_queue_pairs; i++)
2638                 virtnet_disable_queue_pair(vi, i);
2639
2640         return 0;
2641 }
2642
2643 static void virtnet_set_rx_mode(struct net_device *dev)
2644 {
2645         struct virtnet_info *vi = netdev_priv(dev);
2646         struct scatterlist sg[2];
2647         struct virtio_net_ctrl_mac *mac_data;
2648         struct netdev_hw_addr *ha;
2649         int uc_count;
2650         int mc_count;
2651         void *buf;
2652         int i;
2653
2654         /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2655         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2656                 return;
2657
2658         vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2659         vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2660
2661         sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2662
2663         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2664                                   VIRTIO_NET_CTRL_RX_PROMISC, sg))
2665                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2666                          vi->ctrl->promisc ? "en" : "dis");
2667
2668         sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2669
2670         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2671                                   VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2672                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2673                          vi->ctrl->allmulti ? "en" : "dis");
2674
2675         uc_count = netdev_uc_count(dev);
2676         mc_count = netdev_mc_count(dev);
2677         /* MAC filter - use one buffer for both lists */
2678         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2679                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2680         mac_data = buf;
2681         if (!buf)
2682                 return;
2683
2684         sg_init_table(sg, 2);
2685
2686         /* Store the unicast list and count in the front of the buffer */
2687         mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2688         i = 0;
2689         netdev_for_each_uc_addr(ha, dev)
2690                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2691
2692         sg_set_buf(&sg[0], mac_data,
2693                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2694
2695         /* multicast list and count fill the end */
2696         mac_data = (void *)&mac_data->macs[uc_count][0];
2697
2698         mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2699         i = 0;
2700         netdev_for_each_mc_addr(ha, dev)
2701                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2702
2703         sg_set_buf(&sg[1], mac_data,
2704                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2705
2706         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2707                                   VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2708                 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2709
2710         kfree(buf);
2711 }
2712
2713 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2714                                    __be16 proto, u16 vid)
2715 {
2716         struct virtnet_info *vi = netdev_priv(dev);
2717         struct scatterlist sg;
2718
2719         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2720         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2721
2722         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2723                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2724                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2725         return 0;
2726 }
2727
2728 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2729                                     __be16 proto, u16 vid)
2730 {
2731         struct virtnet_info *vi = netdev_priv(dev);
2732         struct scatterlist sg;
2733
2734         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2735         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2736
2737         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2738                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2739                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2740         return 0;
2741 }
2742
2743 static void virtnet_clean_affinity(struct virtnet_info *vi)
2744 {
2745         int i;
2746
2747         if (vi->affinity_hint_set) {
2748                 for (i = 0; i < vi->max_queue_pairs; i++) {
2749                         virtqueue_set_affinity(vi->rq[i].vq, NULL);
2750                         virtqueue_set_affinity(vi->sq[i].vq, NULL);
2751                 }
2752
2753                 vi->affinity_hint_set = false;
2754         }
2755 }
2756
2757 static void virtnet_set_affinity(struct virtnet_info *vi)
2758 {
2759         cpumask_var_t mask;
2760         int stragglers;
2761         int group_size;
2762         int i, j, cpu;
2763         int num_cpu;
2764         int stride;
2765
2766         if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2767                 virtnet_clean_affinity(vi);
2768                 return;
2769         }
2770
2771         num_cpu = num_online_cpus();
2772         stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2773         stragglers = num_cpu >= vi->curr_queue_pairs ?
2774                         num_cpu % vi->curr_queue_pairs :
2775                         0;
2776         cpu = cpumask_first(cpu_online_mask);
2777
2778         for (i = 0; i < vi->curr_queue_pairs; i++) {
2779                 group_size = stride + (i < stragglers ? 1 : 0);
2780
2781                 for (j = 0; j < group_size; j++) {
2782                         cpumask_set_cpu(cpu, mask);
2783                         cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2784                                                 nr_cpu_ids, false);
2785                 }
2786                 virtqueue_set_affinity(vi->rq[i].vq, mask);
2787                 virtqueue_set_affinity(vi->sq[i].vq, mask);
2788                 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2789                 cpumask_clear(mask);
2790         }
2791
2792         vi->affinity_hint_set = true;
2793         free_cpumask_var(mask);
2794 }
2795
2796 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2797 {
2798         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2799                                                    node);
2800         virtnet_set_affinity(vi);
2801         return 0;
2802 }
2803
2804 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2805 {
2806         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2807                                                    node_dead);
2808         virtnet_set_affinity(vi);
2809         return 0;
2810 }
2811
2812 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2813 {
2814         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2815                                                    node);
2816
2817         virtnet_clean_affinity(vi);
2818         return 0;
2819 }
2820
2821 static enum cpuhp_state virtionet_online;
2822
2823 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2824 {
2825         int ret;
2826
2827         ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2828         if (ret)
2829                 return ret;
2830         ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2831                                                &vi->node_dead);
2832         if (!ret)
2833                 return ret;
2834         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2835         return ret;
2836 }
2837
2838 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2839 {
2840         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2841         cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2842                                             &vi->node_dead);
2843 }
2844
2845 static void virtnet_get_ringparam(struct net_device *dev,
2846                                   struct ethtool_ringparam *ring,
2847                                   struct kernel_ethtool_ringparam *kernel_ring,
2848                                   struct netlink_ext_ack *extack)
2849 {
2850         struct virtnet_info *vi = netdev_priv(dev);
2851
2852         ring->rx_max_pending = vi->rq[0].vq->num_max;
2853         ring->tx_max_pending = vi->sq[0].vq->num_max;
2854         ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2855         ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2856 }
2857
2858 static int virtnet_set_ringparam(struct net_device *dev,
2859                                  struct ethtool_ringparam *ring,
2860                                  struct kernel_ethtool_ringparam *kernel_ring,
2861                                  struct netlink_ext_ack *extack)
2862 {
2863         struct virtnet_info *vi = netdev_priv(dev);
2864         u32 rx_pending, tx_pending;
2865         struct receive_queue *rq;
2866         struct send_queue *sq;
2867         int i, err;
2868
2869         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2870                 return -EINVAL;
2871
2872         rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2873         tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2874
2875         if (ring->rx_pending == rx_pending &&
2876             ring->tx_pending == tx_pending)
2877                 return 0;
2878
2879         if (ring->rx_pending > vi->rq[0].vq->num_max)
2880                 return -EINVAL;
2881
2882         if (ring->tx_pending > vi->sq[0].vq->num_max)
2883                 return -EINVAL;
2884
2885         for (i = 0; i < vi->max_queue_pairs; i++) {
2886                 rq = vi->rq + i;
2887                 sq = vi->sq + i;
2888
2889                 if (ring->tx_pending != tx_pending) {
2890                         err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2891                         if (err)
2892                                 return err;
2893                 }
2894
2895                 if (ring->rx_pending != rx_pending) {
2896                         err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2897                         if (err)
2898                                 return err;
2899                 }
2900         }
2901
2902         return 0;
2903 }
2904
2905 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2906 {
2907         struct net_device *dev = vi->dev;
2908         struct scatterlist sgs[4];
2909         unsigned int sg_buf_size;
2910
2911         /* prepare sgs */
2912         sg_init_table(sgs, 4);
2913
2914         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
2915         sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2916
2917         sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2918         sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2919
2920         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
2921                         - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
2922         sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2923
2924         sg_buf_size = vi->rss_key_size;
2925         sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2926
2927         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2928                                   vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
2929                                   : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
2930                 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
2931                 return false;
2932         }
2933         return true;
2934 }
2935
2936 static void virtnet_init_default_rss(struct virtnet_info *vi)
2937 {
2938         u32 indir_val = 0;
2939         int i = 0;
2940
2941         vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
2942         vi->rss_hash_types_saved = vi->rss_hash_types_supported;
2943         vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
2944                                                 ? vi->rss_indir_table_size - 1 : 0;
2945         vi->ctrl->rss.unclassified_queue = 0;
2946
2947         for (; i < vi->rss_indir_table_size; ++i) {
2948                 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
2949                 vi->ctrl->rss.indirection_table[i] = indir_val;
2950         }
2951
2952         vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
2953         vi->ctrl->rss.hash_key_length = vi->rss_key_size;
2954
2955         netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
2956 }
2957
2958 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
2959 {
2960         info->data = 0;
2961         switch (info->flow_type) {
2962         case TCP_V4_FLOW:
2963                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
2964                         info->data = RXH_IP_SRC | RXH_IP_DST |
2965                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2966                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2967                         info->data = RXH_IP_SRC | RXH_IP_DST;
2968                 }
2969                 break;
2970         case TCP_V6_FLOW:
2971                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
2972                         info->data = RXH_IP_SRC | RXH_IP_DST |
2973                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2974                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2975                         info->data = RXH_IP_SRC | RXH_IP_DST;
2976                 }
2977                 break;
2978         case UDP_V4_FLOW:
2979                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
2980                         info->data = RXH_IP_SRC | RXH_IP_DST |
2981                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2982                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2983                         info->data = RXH_IP_SRC | RXH_IP_DST;
2984                 }
2985                 break;
2986         case UDP_V6_FLOW:
2987                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
2988                         info->data = RXH_IP_SRC | RXH_IP_DST |
2989                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2990                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2991                         info->data = RXH_IP_SRC | RXH_IP_DST;
2992                 }
2993                 break;
2994         case IPV4_FLOW:
2995                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
2996                         info->data = RXH_IP_SRC | RXH_IP_DST;
2997
2998                 break;
2999         case IPV6_FLOW:
3000                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3001                         info->data = RXH_IP_SRC | RXH_IP_DST;
3002
3003                 break;
3004         default:
3005                 info->data = 0;
3006                 break;
3007         }
3008 }
3009
3010 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3011 {
3012         u32 new_hashtypes = vi->rss_hash_types_saved;
3013         bool is_disable = info->data & RXH_DISCARD;
3014         bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3015
3016         /* supports only 'sd', 'sdfn' and 'r' */
3017         if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3018                 return false;
3019
3020         switch (info->flow_type) {
3021         case TCP_V4_FLOW:
3022                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3023                 if (!is_disable)
3024                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3025                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3026                 break;
3027         case UDP_V4_FLOW:
3028                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3029                 if (!is_disable)
3030                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3031                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3032                 break;
3033         case IPV4_FLOW:
3034                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3035                 if (!is_disable)
3036                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3037                 break;
3038         case TCP_V6_FLOW:
3039                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3040                 if (!is_disable)
3041                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3042                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3043                 break;
3044         case UDP_V6_FLOW:
3045                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3046                 if (!is_disable)
3047                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3048                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3049                 break;
3050         case IPV6_FLOW:
3051                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3052                 if (!is_disable)
3053                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3054                 break;
3055         default:
3056                 /* unsupported flow */
3057                 return false;
3058         }
3059
3060         /* if unsupported hashtype was set */
3061         if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3062                 return false;
3063
3064         if (new_hashtypes != vi->rss_hash_types_saved) {
3065                 vi->rss_hash_types_saved = new_hashtypes;
3066                 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3067                 if (vi->dev->features & NETIF_F_RXHASH)
3068                         return virtnet_commit_rss_command(vi);
3069         }
3070
3071         return true;
3072 }
3073
3074 static void virtnet_get_drvinfo(struct net_device *dev,
3075                                 struct ethtool_drvinfo *info)
3076 {
3077         struct virtnet_info *vi = netdev_priv(dev);
3078         struct virtio_device *vdev = vi->vdev;
3079
3080         strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3081         strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3082         strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3083
3084 }
3085
3086 /* TODO: Eliminate OOO packets during switching */
3087 static int virtnet_set_channels(struct net_device *dev,
3088                                 struct ethtool_channels *channels)
3089 {
3090         struct virtnet_info *vi = netdev_priv(dev);
3091         u16 queue_pairs = channels->combined_count;
3092         int err;
3093
3094         /* We don't support separate rx/tx channels.
3095          * We don't allow setting 'other' channels.
3096          */
3097         if (channels->rx_count || channels->tx_count || channels->other_count)
3098                 return -EINVAL;
3099
3100         if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3101                 return -EINVAL;
3102
3103         /* For now we don't support modifying channels while XDP is loaded
3104          * also when XDP is loaded all RX queues have XDP programs so we only
3105          * need to check a single RX queue.
3106          */
3107         if (vi->rq[0].xdp_prog)
3108                 return -EINVAL;
3109
3110         cpus_read_lock();
3111         err = _virtnet_set_queues(vi, queue_pairs);
3112         if (err) {
3113                 cpus_read_unlock();
3114                 goto err;
3115         }
3116         virtnet_set_affinity(vi);
3117         cpus_read_unlock();
3118
3119         netif_set_real_num_tx_queues(dev, queue_pairs);
3120         netif_set_real_num_rx_queues(dev, queue_pairs);
3121  err:
3122         return err;
3123 }
3124
3125 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3126 {
3127         struct virtnet_info *vi = netdev_priv(dev);
3128         unsigned int i, j;
3129         u8 *p = data;
3130
3131         switch (stringset) {
3132         case ETH_SS_STATS:
3133                 for (i = 0; i < vi->curr_queue_pairs; i++) {
3134                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3135                                 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3136                                                 virtnet_rq_stats_desc[j].desc);
3137                 }
3138
3139                 for (i = 0; i < vi->curr_queue_pairs; i++) {
3140                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3141                                 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3142                                                 virtnet_sq_stats_desc[j].desc);
3143                 }
3144                 break;
3145         }
3146 }
3147
3148 static int virtnet_get_sset_count(struct net_device *dev, int sset)
3149 {
3150         struct virtnet_info *vi = netdev_priv(dev);
3151
3152         switch (sset) {
3153         case ETH_SS_STATS:
3154                 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3155                                                VIRTNET_SQ_STATS_LEN);
3156         default:
3157                 return -EOPNOTSUPP;
3158         }
3159 }
3160
3161 static void virtnet_get_ethtool_stats(struct net_device *dev,
3162                                       struct ethtool_stats *stats, u64 *data)
3163 {
3164         struct virtnet_info *vi = netdev_priv(dev);
3165         unsigned int idx = 0, start, i, j;
3166         const u8 *stats_base;
3167         size_t offset;
3168
3169         for (i = 0; i < vi->curr_queue_pairs; i++) {
3170                 struct receive_queue *rq = &vi->rq[i];
3171
3172                 stats_base = (u8 *)&rq->stats;
3173                 do {
3174                         start = u64_stats_fetch_begin(&rq->stats.syncp);
3175                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3176                                 offset = virtnet_rq_stats_desc[j].offset;
3177                                 data[idx + j] = *(u64 *)(stats_base + offset);
3178                         }
3179                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3180                 idx += VIRTNET_RQ_STATS_LEN;
3181         }
3182
3183         for (i = 0; i < vi->curr_queue_pairs; i++) {
3184                 struct send_queue *sq = &vi->sq[i];
3185
3186                 stats_base = (u8 *)&sq->stats;
3187                 do {
3188                         start = u64_stats_fetch_begin(&sq->stats.syncp);
3189                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3190                                 offset = virtnet_sq_stats_desc[j].offset;
3191                                 data[idx + j] = *(u64 *)(stats_base + offset);
3192                         }
3193                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3194                 idx += VIRTNET_SQ_STATS_LEN;
3195         }
3196 }
3197
3198 static void virtnet_get_channels(struct net_device *dev,
3199                                  struct ethtool_channels *channels)
3200 {
3201         struct virtnet_info *vi = netdev_priv(dev);
3202
3203         channels->combined_count = vi->curr_queue_pairs;
3204         channels->max_combined = vi->max_queue_pairs;
3205         channels->max_other = 0;
3206         channels->rx_count = 0;
3207         channels->tx_count = 0;
3208         channels->other_count = 0;
3209 }
3210
3211 static int virtnet_set_link_ksettings(struct net_device *dev,
3212                                       const struct ethtool_link_ksettings *cmd)
3213 {
3214         struct virtnet_info *vi = netdev_priv(dev);
3215
3216         return ethtool_virtdev_set_link_ksettings(dev, cmd,
3217                                                   &vi->speed, &vi->duplex);
3218 }
3219
3220 static int virtnet_get_link_ksettings(struct net_device *dev,
3221                                       struct ethtool_link_ksettings *cmd)
3222 {
3223         struct virtnet_info *vi = netdev_priv(dev);
3224
3225         cmd->base.speed = vi->speed;
3226         cmd->base.duplex = vi->duplex;
3227         cmd->base.port = PORT_OTHER;
3228
3229         return 0;
3230 }
3231
3232 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3233                                        struct ethtool_coalesce *ec)
3234 {
3235         struct scatterlist sgs_tx, sgs_rx;
3236
3237         vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3238         vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3239         sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3240
3241         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3242                                   VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3243                                   &sgs_tx))
3244                 return -EINVAL;
3245
3246         /* Save parameters */
3247         vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3248         vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3249
3250         vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3251         vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3252         sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3253
3254         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3255                                   VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3256                                   &sgs_rx))
3257                 return -EINVAL;
3258
3259         /* Save parameters */
3260         vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3261         vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3262
3263         return 0;
3264 }
3265
3266 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3267                                          u16 vqn, u32 max_usecs, u32 max_packets)
3268 {
3269         struct scatterlist sgs;
3270
3271         vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
3272         vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
3273         vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
3274         sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
3275
3276         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3277                                   VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3278                                   &sgs))
3279                 return -EINVAL;
3280
3281         return 0;
3282 }
3283
3284 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3285                                           struct ethtool_coalesce *ec,
3286                                           u16 queue)
3287 {
3288         int err;
3289
3290         if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
3291                 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3292                                                     ec->rx_coalesce_usecs,
3293                                                     ec->rx_max_coalesced_frames);
3294                 if (err)
3295                         return err;
3296                 /* Save parameters */
3297                 vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3298                 vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3299         }
3300
3301         if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
3302                 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3303                                                     ec->tx_coalesce_usecs,
3304                                                     ec->tx_max_coalesced_frames);
3305                 if (err)
3306                         return err;
3307                 /* Save parameters */
3308                 vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3309                 vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3310         }
3311
3312         return 0;
3313 }
3314
3315 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3316 {
3317         /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3318          * feature is negotiated.
3319          */
3320         if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3321                 return -EOPNOTSUPP;
3322
3323         if (ec->tx_max_coalesced_frames > 1 ||
3324             ec->rx_max_coalesced_frames != 1)
3325                 return -EINVAL;
3326
3327         return 0;
3328 }
3329
3330 static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3331                                            int vq_weight, bool *should_update)
3332 {
3333         if (weight ^ vq_weight) {
3334                 if (dev_flags & IFF_UP)
3335                         return -EBUSY;
3336                 *should_update = true;
3337         }
3338
3339         return 0;
3340 }
3341
3342 static int virtnet_set_coalesce(struct net_device *dev,
3343                                 struct ethtool_coalesce *ec,
3344                                 struct kernel_ethtool_coalesce *kernel_coal,
3345                                 struct netlink_ext_ack *extack)
3346 {
3347         struct virtnet_info *vi = netdev_priv(dev);
3348         int ret, queue_number, napi_weight;
3349         bool update_napi = false;
3350
3351         /* Can't change NAPI weight if the link is up */
3352         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3353         for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3354                 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3355                                                       vi->sq[queue_number].napi.weight,
3356                                                       &update_napi);
3357                 if (ret)
3358                         return ret;
3359
3360                 if (update_napi) {
3361                         /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3362                          * updated for the sake of simplicity, which might not be necessary
3363                          */
3364                         break;
3365                 }
3366         }
3367
3368         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3369                 ret = virtnet_send_notf_coal_cmds(vi, ec);
3370         else
3371                 ret = virtnet_coal_params_supported(ec);
3372
3373         if (ret)
3374                 return ret;
3375
3376         if (update_napi) {
3377                 for (; queue_number < vi->max_queue_pairs; queue_number++)
3378                         vi->sq[queue_number].napi.weight = napi_weight;
3379         }
3380
3381         return ret;
3382 }
3383
3384 static int virtnet_get_coalesce(struct net_device *dev,
3385                                 struct ethtool_coalesce *ec,
3386                                 struct kernel_ethtool_coalesce *kernel_coal,
3387                                 struct netlink_ext_ack *extack)
3388 {
3389         struct virtnet_info *vi = netdev_priv(dev);
3390
3391         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3392                 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3393                 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3394                 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3395                 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3396         } else {
3397                 ec->rx_max_coalesced_frames = 1;
3398
3399                 if (vi->sq[0].napi.weight)
3400                         ec->tx_max_coalesced_frames = 1;
3401         }
3402
3403         return 0;
3404 }
3405
3406 static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3407                                           u32 queue,
3408                                           struct ethtool_coalesce *ec)
3409 {
3410         struct virtnet_info *vi = netdev_priv(dev);
3411         int ret, napi_weight;
3412         bool update_napi = false;
3413
3414         if (queue >= vi->max_queue_pairs)
3415                 return -EINVAL;
3416
3417         /* Can't change NAPI weight if the link is up */
3418         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3419         ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3420                                               vi->sq[queue].napi.weight,
3421                                               &update_napi);
3422         if (ret)
3423                 return ret;
3424
3425         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3426                 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3427         else
3428                 ret = virtnet_coal_params_supported(ec);
3429
3430         if (ret)
3431                 return ret;
3432
3433         if (update_napi)
3434                 vi->sq[queue].napi.weight = napi_weight;
3435
3436         return 0;
3437 }
3438
3439 static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3440                                           u32 queue,
3441                                           struct ethtool_coalesce *ec)
3442 {
3443         struct virtnet_info *vi = netdev_priv(dev);
3444
3445         if (queue >= vi->max_queue_pairs)
3446                 return -EINVAL;
3447
3448         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3449                 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3450                 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3451                 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3452                 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3453         } else {
3454                 ec->rx_max_coalesced_frames = 1;
3455
3456                 if (vi->sq[0].napi.weight)
3457                         ec->tx_max_coalesced_frames = 1;
3458         }
3459
3460         return 0;
3461 }
3462
3463 static void virtnet_init_settings(struct net_device *dev)
3464 {
3465         struct virtnet_info *vi = netdev_priv(dev);
3466
3467         vi->speed = SPEED_UNKNOWN;
3468         vi->duplex = DUPLEX_UNKNOWN;
3469 }
3470
3471 static void virtnet_update_settings(struct virtnet_info *vi)
3472 {
3473         u32 speed;
3474         u8 duplex;
3475
3476         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3477                 return;
3478
3479         virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3480
3481         if (ethtool_validate_speed(speed))
3482                 vi->speed = speed;
3483
3484         virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3485
3486         if (ethtool_validate_duplex(duplex))
3487                 vi->duplex = duplex;
3488 }
3489
3490 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3491 {
3492         return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3493 }
3494
3495 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3496 {
3497         return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3498 }
3499
3500 static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
3501 {
3502         struct virtnet_info *vi = netdev_priv(dev);
3503         int i;
3504
3505         if (indir) {
3506                 for (i = 0; i < vi->rss_indir_table_size; ++i)
3507                         indir[i] = vi->ctrl->rss.indirection_table[i];
3508         }
3509
3510         if (key)
3511                 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3512
3513         if (hfunc)
3514                 *hfunc = ETH_RSS_HASH_TOP;
3515
3516         return 0;
3517 }
3518
3519 static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
3520 {
3521         struct virtnet_info *vi = netdev_priv(dev);
3522         int i;
3523
3524         if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3525                 return -EOPNOTSUPP;
3526
3527         if (indir) {
3528                 for (i = 0; i < vi->rss_indir_table_size; ++i)
3529                         vi->ctrl->rss.indirection_table[i] = indir[i];
3530         }
3531         if (key)
3532                 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
3533
3534         virtnet_commit_rss_command(vi);
3535
3536         return 0;
3537 }
3538
3539 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3540 {
3541         struct virtnet_info *vi = netdev_priv(dev);
3542         int rc = 0;
3543
3544         switch (info->cmd) {
3545         case ETHTOOL_GRXRINGS:
3546                 info->data = vi->curr_queue_pairs;
3547                 break;
3548         case ETHTOOL_GRXFH:
3549                 virtnet_get_hashflow(vi, info);
3550                 break;
3551         default:
3552                 rc = -EOPNOTSUPP;
3553         }
3554
3555         return rc;
3556 }
3557
3558 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3559 {
3560         struct virtnet_info *vi = netdev_priv(dev);
3561         int rc = 0;
3562
3563         switch (info->cmd) {
3564         case ETHTOOL_SRXFH:
3565                 if (!virtnet_set_hashflow(vi, info))
3566                         rc = -EINVAL;
3567
3568                 break;
3569         default:
3570                 rc = -EOPNOTSUPP;
3571         }
3572
3573         return rc;
3574 }
3575
3576 static const struct ethtool_ops virtnet_ethtool_ops = {
3577         .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3578                 ETHTOOL_COALESCE_USECS,
3579         .get_drvinfo = virtnet_get_drvinfo,
3580         .get_link = ethtool_op_get_link,
3581         .get_ringparam = virtnet_get_ringparam,
3582         .set_ringparam = virtnet_set_ringparam,
3583         .get_strings = virtnet_get_strings,
3584         .get_sset_count = virtnet_get_sset_count,
3585         .get_ethtool_stats = virtnet_get_ethtool_stats,
3586         .set_channels = virtnet_set_channels,
3587         .get_channels = virtnet_get_channels,
3588         .get_ts_info = ethtool_op_get_ts_info,
3589         .get_link_ksettings = virtnet_get_link_ksettings,
3590         .set_link_ksettings = virtnet_set_link_ksettings,
3591         .set_coalesce = virtnet_set_coalesce,
3592         .get_coalesce = virtnet_get_coalesce,
3593         .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3594         .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3595         .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3596         .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3597         .get_rxfh = virtnet_get_rxfh,
3598         .set_rxfh = virtnet_set_rxfh,
3599         .get_rxnfc = virtnet_get_rxnfc,
3600         .set_rxnfc = virtnet_set_rxnfc,
3601 };
3602
3603 static void virtnet_freeze_down(struct virtio_device *vdev)
3604 {
3605         struct virtnet_info *vi = vdev->priv;
3606
3607         /* Make sure no work handler is accessing the device */
3608         flush_work(&vi->config_work);
3609
3610         netif_tx_lock_bh(vi->dev);
3611         netif_device_detach(vi->dev);
3612         netif_tx_unlock_bh(vi->dev);
3613         if (netif_running(vi->dev))
3614                 virtnet_close(vi->dev);
3615 }
3616
3617 static int init_vqs(struct virtnet_info *vi);
3618
3619 static int virtnet_restore_up(struct virtio_device *vdev)
3620 {
3621         struct virtnet_info *vi = vdev->priv;
3622         int err;
3623
3624         err = init_vqs(vi);
3625         if (err)
3626                 return err;
3627
3628         virtio_device_ready(vdev);
3629
3630         enable_delayed_refill(vi);
3631
3632         if (netif_running(vi->dev)) {
3633                 err = virtnet_open(vi->dev);
3634                 if (err)
3635                         return err;
3636         }
3637
3638         netif_tx_lock_bh(vi->dev);
3639         netif_device_attach(vi->dev);
3640         netif_tx_unlock_bh(vi->dev);
3641         return err;
3642 }
3643
3644 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3645 {
3646         struct scatterlist sg;
3647         vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3648
3649         sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3650
3651         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3652                                   VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3653                 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3654                 return -EINVAL;
3655         }
3656
3657         return 0;
3658 }
3659
3660 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3661 {
3662         u64 offloads = 0;
3663
3664         if (!vi->guest_offloads)
3665                 return 0;
3666
3667         return virtnet_set_guest_offloads(vi, offloads);
3668 }
3669
3670 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3671 {
3672         u64 offloads = vi->guest_offloads;
3673
3674         if (!vi->guest_offloads)
3675                 return 0;
3676
3677         return virtnet_set_guest_offloads(vi, offloads);
3678 }
3679
3680 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3681                            struct netlink_ext_ack *extack)
3682 {
3683         unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3684                                            sizeof(struct skb_shared_info));
3685         unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3686         struct virtnet_info *vi = netdev_priv(dev);
3687         struct bpf_prog *old_prog;
3688         u16 xdp_qp = 0, curr_qp;
3689         int i, err;
3690
3691         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3692             && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3693                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3694                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3695                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3696                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3697                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3698                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3699                 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3700                 return -EOPNOTSUPP;
3701         }
3702
3703         if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3704                 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3705                 return -EINVAL;
3706         }
3707
3708         if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3709                 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3710                 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3711                 return -EINVAL;
3712         }
3713
3714         curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3715         if (prog)
3716                 xdp_qp = nr_cpu_ids;
3717
3718         /* XDP requires extra queues for XDP_TX */
3719         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3720                 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3721                                  curr_qp + xdp_qp, vi->max_queue_pairs);
3722                 xdp_qp = 0;
3723         }
3724
3725         old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3726         if (!prog && !old_prog)
3727                 return 0;
3728
3729         if (prog)
3730                 bpf_prog_add(prog, vi->max_queue_pairs - 1);
3731
3732         /* Make sure NAPI is not using any XDP TX queues for RX. */
3733         if (netif_running(dev)) {
3734                 for (i = 0; i < vi->max_queue_pairs; i++) {
3735                         napi_disable(&vi->rq[i].napi);
3736                         virtnet_napi_tx_disable(&vi->sq[i].napi);
3737                 }
3738         }
3739
3740         if (!prog) {
3741                 for (i = 0; i < vi->max_queue_pairs; i++) {
3742                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3743                         if (i == 0)
3744                                 virtnet_restore_guest_offloads(vi);
3745                 }
3746                 synchronize_net();
3747         }
3748
3749         err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
3750         if (err)
3751                 goto err;
3752         netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
3753         vi->xdp_queue_pairs = xdp_qp;
3754
3755         if (prog) {
3756                 vi->xdp_enabled = true;
3757                 for (i = 0; i < vi->max_queue_pairs; i++) {
3758                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3759                         if (i == 0 && !old_prog)
3760                                 virtnet_clear_guest_offloads(vi);
3761                 }
3762                 if (!old_prog)
3763                         xdp_features_set_redirect_target(dev, true);
3764         } else {
3765                 xdp_features_clear_redirect_target(dev);
3766                 vi->xdp_enabled = false;
3767         }
3768
3769         for (i = 0; i < vi->max_queue_pairs; i++) {
3770                 if (old_prog)
3771                         bpf_prog_put(old_prog);
3772                 if (netif_running(dev)) {
3773                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3774                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3775                                                &vi->sq[i].napi);
3776                 }
3777         }
3778
3779         return 0;
3780
3781 err:
3782         if (!prog) {
3783                 virtnet_clear_guest_offloads(vi);
3784                 for (i = 0; i < vi->max_queue_pairs; i++)
3785                         rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
3786         }
3787
3788         if (netif_running(dev)) {
3789                 for (i = 0; i < vi->max_queue_pairs; i++) {
3790                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3791                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3792                                                &vi->sq[i].napi);
3793                 }
3794         }
3795         if (prog)
3796                 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
3797         return err;
3798 }
3799
3800 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3801 {
3802         switch (xdp->command) {
3803         case XDP_SETUP_PROG:
3804                 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
3805         default:
3806                 return -EINVAL;
3807         }
3808 }
3809
3810 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
3811                                       size_t len)
3812 {
3813         struct virtnet_info *vi = netdev_priv(dev);
3814         int ret;
3815
3816         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3817                 return -EOPNOTSUPP;
3818
3819         ret = snprintf(buf, len, "sby");
3820         if (ret >= len)
3821                 return -EOPNOTSUPP;
3822
3823         return 0;
3824 }
3825
3826 static int virtnet_set_features(struct net_device *dev,
3827                                 netdev_features_t features)
3828 {
3829         struct virtnet_info *vi = netdev_priv(dev);
3830         u64 offloads;
3831         int err;
3832
3833         if ((dev->features ^ features) & NETIF_F_GRO_HW) {
3834                 if (vi->xdp_enabled)
3835                         return -EBUSY;
3836
3837                 if (features & NETIF_F_GRO_HW)
3838                         offloads = vi->guest_offloads_capable;
3839                 else
3840                         offloads = vi->guest_offloads_capable &
3841                                    ~GUEST_OFFLOAD_GRO_HW_MASK;
3842
3843                 err = virtnet_set_guest_offloads(vi, offloads);
3844                 if (err)
3845                         return err;
3846                 vi->guest_offloads = offloads;
3847         }
3848
3849         if ((dev->features ^ features) & NETIF_F_RXHASH) {
3850                 if (features & NETIF_F_RXHASH)
3851                         vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3852                 else
3853                         vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3854
3855                 if (!virtnet_commit_rss_command(vi))
3856                         return -EINVAL;
3857         }
3858
3859         return 0;
3860 }
3861
3862 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3863 {
3864         struct virtnet_info *priv = netdev_priv(dev);
3865         struct send_queue *sq = &priv->sq[txqueue];
3866         struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3867
3868         u64_stats_update_begin(&sq->stats.syncp);
3869         sq->stats.tx_timeouts++;
3870         u64_stats_update_end(&sq->stats.syncp);
3871
3872         netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3873                    txqueue, sq->name, sq->vq->index, sq->vq->name,
3874                    jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
3875 }
3876
3877 static const struct net_device_ops virtnet_netdev = {
3878         .ndo_open            = virtnet_open,
3879         .ndo_stop            = virtnet_close,
3880         .ndo_start_xmit      = start_xmit,
3881         .ndo_validate_addr   = eth_validate_addr,
3882         .ndo_set_mac_address = virtnet_set_mac_address,
3883         .ndo_set_rx_mode     = virtnet_set_rx_mode,
3884         .ndo_get_stats64     = virtnet_stats,
3885         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
3886         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
3887         .ndo_bpf                = virtnet_xdp,
3888         .ndo_xdp_xmit           = virtnet_xdp_xmit,
3889         .ndo_features_check     = passthru_features_check,
3890         .ndo_get_phys_port_name = virtnet_get_phys_port_name,
3891         .ndo_set_features       = virtnet_set_features,
3892         .ndo_tx_timeout         = virtnet_tx_timeout,
3893 };
3894
3895 static void virtnet_config_changed_work(struct work_struct *work)
3896 {
3897         struct virtnet_info *vi =
3898                 container_of(work, struct virtnet_info, config_work);
3899         u16 v;
3900
3901         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3902                                  struct virtio_net_config, status, &v) < 0)
3903                 return;
3904
3905         if (v & VIRTIO_NET_S_ANNOUNCE) {
3906                 netdev_notify_peers(vi->dev);
3907                 virtnet_ack_link_announce(vi);
3908         }
3909
3910         /* Ignore unknown (future) status bits */
3911         v &= VIRTIO_NET_S_LINK_UP;
3912
3913         if (vi->status == v)
3914                 return;
3915
3916         vi->status = v;
3917
3918         if (vi->status & VIRTIO_NET_S_LINK_UP) {
3919                 virtnet_update_settings(vi);
3920                 netif_carrier_on(vi->dev);
3921                 netif_tx_wake_all_queues(vi->dev);
3922         } else {
3923                 netif_carrier_off(vi->dev);
3924                 netif_tx_stop_all_queues(vi->dev);
3925         }
3926 }
3927
3928 static void virtnet_config_changed(struct virtio_device *vdev)
3929 {
3930         struct virtnet_info *vi = vdev->priv;
3931
3932         schedule_work(&vi->config_work);
3933 }
3934
3935 static void virtnet_free_queues(struct virtnet_info *vi)
3936 {
3937         int i;
3938
3939         for (i = 0; i < vi->max_queue_pairs; i++) {
3940                 __netif_napi_del(&vi->rq[i].napi);
3941                 __netif_napi_del(&vi->sq[i].napi);
3942         }
3943
3944         /* We called __netif_napi_del(),
3945          * we need to respect an RCU grace period before freeing vi->rq
3946          */
3947         synchronize_net();
3948
3949         kfree(vi->rq);
3950         kfree(vi->sq);
3951         kfree(vi->ctrl);
3952 }
3953
3954 static void _free_receive_bufs(struct virtnet_info *vi)
3955 {
3956         struct bpf_prog *old_prog;
3957         int i;
3958
3959         for (i = 0; i < vi->max_queue_pairs; i++) {
3960                 while (vi->rq[i].pages)
3961                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
3962
3963                 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
3964                 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
3965                 if (old_prog)
3966                         bpf_prog_put(old_prog);
3967         }
3968 }
3969
3970 static void free_receive_bufs(struct virtnet_info *vi)
3971 {
3972         rtnl_lock();
3973         _free_receive_bufs(vi);
3974         rtnl_unlock();
3975 }
3976
3977 static void free_receive_page_frags(struct virtnet_info *vi)
3978 {
3979         int i;
3980         for (i = 0; i < vi->max_queue_pairs; i++)
3981                 if (vi->rq[i].alloc_frag.page) {
3982                         if (vi->rq[i].do_dma && vi->rq[i].last_dma)
3983                                 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
3984                         put_page(vi->rq[i].alloc_frag.page);
3985                 }
3986 }
3987
3988 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
3989 {
3990         if (!is_xdp_frame(buf))
3991                 dev_kfree_skb(buf);
3992         else
3993                 xdp_return_frame(ptr_to_xdp(buf));
3994 }
3995
3996 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
3997 {
3998         struct virtnet_info *vi = vq->vdev->priv;
3999         int i = vq2rxq(vq);
4000
4001         if (vi->mergeable_rx_bufs)
4002                 put_page(virt_to_head_page(buf));
4003         else if (vi->big_packets)
4004                 give_pages(&vi->rq[i], buf);
4005         else
4006                 put_page(virt_to_head_page(buf));
4007 }
4008
4009 static void free_unused_bufs(struct virtnet_info *vi)
4010 {
4011         void *buf;
4012         int i;
4013
4014         for (i = 0; i < vi->max_queue_pairs; i++) {
4015                 struct virtqueue *vq = vi->sq[i].vq;
4016                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4017                         virtnet_sq_free_unused_buf(vq, buf);
4018                 cond_resched();
4019         }
4020
4021         for (i = 0; i < vi->max_queue_pairs; i++) {
4022                 struct receive_queue *rq = &vi->rq[i];
4023
4024                 while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
4025                         virtnet_rq_free_unused_buf(rq->vq, buf);
4026                 cond_resched();
4027         }
4028 }
4029
4030 static void virtnet_del_vqs(struct virtnet_info *vi)
4031 {
4032         struct virtio_device *vdev = vi->vdev;
4033
4034         virtnet_clean_affinity(vi);
4035
4036         vdev->config->del_vqs(vdev);
4037
4038         virtnet_free_queues(vi);
4039 }
4040
4041 /* How large should a single buffer be so a queue full of these can fit at
4042  * least one full packet?
4043  * Logic below assumes the mergeable buffer header is used.
4044  */
4045 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4046 {
4047         const unsigned int hdr_len = vi->hdr_len;
4048         unsigned int rq_size = virtqueue_get_vring_size(vq);
4049         unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4050         unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4051         unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4052
4053         return max(max(min_buf_len, hdr_len) - hdr_len,
4054                    (unsigned int)GOOD_PACKET_LEN);
4055 }
4056
4057 static int virtnet_find_vqs(struct virtnet_info *vi)
4058 {
4059         vq_callback_t **callbacks;
4060         struct virtqueue **vqs;
4061         int ret = -ENOMEM;
4062         int i, total_vqs;
4063         const char **names;
4064         bool *ctx;
4065
4066         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4067          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4068          * possible control vq.
4069          */
4070         total_vqs = vi->max_queue_pairs * 2 +
4071                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4072
4073         /* Allocate space for find_vqs parameters */
4074         vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4075         if (!vqs)
4076                 goto err_vq;
4077         callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4078         if (!callbacks)
4079                 goto err_callback;
4080         names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4081         if (!names)
4082                 goto err_names;
4083         if (!vi->big_packets || vi->mergeable_rx_bufs) {
4084                 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4085                 if (!ctx)
4086                         goto err_ctx;
4087         } else {
4088                 ctx = NULL;
4089         }
4090
4091         /* Parameters for control virtqueue, if any */
4092         if (vi->has_cvq) {
4093                 callbacks[total_vqs - 1] = NULL;
4094                 names[total_vqs - 1] = "control";
4095         }
4096
4097         /* Allocate/initialize parameters for send/receive virtqueues */
4098         for (i = 0; i < vi->max_queue_pairs; i++) {
4099                 callbacks[rxq2vq(i)] = skb_recv_done;
4100                 callbacks[txq2vq(i)] = skb_xmit_done;
4101                 sprintf(vi->rq[i].name, "input.%d", i);
4102                 sprintf(vi->sq[i].name, "output.%d", i);
4103                 names[rxq2vq(i)] = vi->rq[i].name;
4104                 names[txq2vq(i)] = vi->sq[i].name;
4105                 if (ctx)
4106                         ctx[rxq2vq(i)] = true;
4107         }
4108
4109         ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4110                                   names, ctx, NULL);
4111         if (ret)
4112                 goto err_find;
4113
4114         if (vi->has_cvq) {
4115                 vi->cvq = vqs[total_vqs - 1];
4116                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4117                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4118         }
4119
4120         for (i = 0; i < vi->max_queue_pairs; i++) {
4121                 vi->rq[i].vq = vqs[rxq2vq(i)];
4122                 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4123                 vi->sq[i].vq = vqs[txq2vq(i)];
4124         }
4125
4126         /* run here: ret == 0. */
4127
4128
4129 err_find:
4130         kfree(ctx);
4131 err_ctx:
4132         kfree(names);
4133 err_names:
4134         kfree(callbacks);
4135 err_callback:
4136         kfree(vqs);
4137 err_vq:
4138         return ret;
4139 }
4140
4141 static int virtnet_alloc_queues(struct virtnet_info *vi)
4142 {
4143         int i;
4144
4145         if (vi->has_cvq) {
4146                 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4147                 if (!vi->ctrl)
4148                         goto err_ctrl;
4149         } else {
4150                 vi->ctrl = NULL;
4151         }
4152         vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4153         if (!vi->sq)
4154                 goto err_sq;
4155         vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4156         if (!vi->rq)
4157                 goto err_rq;
4158
4159         INIT_DELAYED_WORK(&vi->refill, refill_work);
4160         for (i = 0; i < vi->max_queue_pairs; i++) {
4161                 vi->rq[i].pages = NULL;
4162                 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4163                                       napi_weight);
4164                 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4165                                          virtnet_poll_tx,
4166                                          napi_tx ? napi_weight : 0);
4167
4168                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4169                 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4170                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4171
4172                 u64_stats_init(&vi->rq[i].stats.syncp);
4173                 u64_stats_init(&vi->sq[i].stats.syncp);
4174         }
4175
4176         return 0;
4177
4178 err_rq:
4179         kfree(vi->sq);
4180 err_sq:
4181         kfree(vi->ctrl);
4182 err_ctrl:
4183         return -ENOMEM;
4184 }
4185
4186 static int init_vqs(struct virtnet_info *vi)
4187 {
4188         int ret;
4189
4190         /* Allocate send & receive queues */
4191         ret = virtnet_alloc_queues(vi);
4192         if (ret)
4193                 goto err;
4194
4195         ret = virtnet_find_vqs(vi);
4196         if (ret)
4197                 goto err_free;
4198
4199         virtnet_rq_set_premapped(vi);
4200
4201         cpus_read_lock();
4202         virtnet_set_affinity(vi);
4203         cpus_read_unlock();
4204
4205         return 0;
4206
4207 err_free:
4208         virtnet_free_queues(vi);
4209 err:
4210         return ret;
4211 }
4212
4213 #ifdef CONFIG_SYSFS
4214 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4215                 char *buf)
4216 {
4217         struct virtnet_info *vi = netdev_priv(queue->dev);
4218         unsigned int queue_index = get_netdev_rx_queue_index(queue);
4219         unsigned int headroom = virtnet_get_headroom(vi);
4220         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
4221         struct ewma_pkt_len *avg;
4222
4223         BUG_ON(queue_index >= vi->max_queue_pairs);
4224         avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4225         return sprintf(buf, "%u\n",
4226                        get_mergeable_buf_len(&vi->rq[queue_index], avg,
4227                                        SKB_DATA_ALIGN(headroom + tailroom)));
4228 }
4229
4230 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4231         __ATTR_RO(mergeable_rx_buffer_size);
4232
4233 static struct attribute *virtio_net_mrg_rx_attrs[] = {
4234         &mergeable_rx_buffer_size_attribute.attr,
4235         NULL
4236 };
4237
4238 static const struct attribute_group virtio_net_mrg_rx_group = {
4239         .name = "virtio_net",
4240         .attrs = virtio_net_mrg_rx_attrs
4241 };
4242 #endif
4243
4244 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4245                                     unsigned int fbit,
4246                                     const char *fname, const char *dname)
4247 {
4248         if (!virtio_has_feature(vdev, fbit))
4249                 return false;
4250
4251         dev_err(&vdev->dev, "device advertises feature %s but not %s",
4252                 fname, dname);
4253
4254         return true;
4255 }
4256
4257 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)                       \
4258         virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4259
4260 static bool virtnet_validate_features(struct virtio_device *vdev)
4261 {
4262         if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4263             (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4264                              "VIRTIO_NET_F_CTRL_VQ") ||
4265              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4266                              "VIRTIO_NET_F_CTRL_VQ") ||
4267              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4268                              "VIRTIO_NET_F_CTRL_VQ") ||
4269              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4270              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4271                              "VIRTIO_NET_F_CTRL_VQ") ||
4272              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
4273                              "VIRTIO_NET_F_CTRL_VQ") ||
4274              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4275                              "VIRTIO_NET_F_CTRL_VQ") ||
4276              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
4277                              "VIRTIO_NET_F_CTRL_VQ") ||
4278              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4279                              "VIRTIO_NET_F_CTRL_VQ"))) {
4280                 return false;
4281         }
4282
4283         return true;
4284 }
4285
4286 #define MIN_MTU ETH_MIN_MTU
4287 #define MAX_MTU ETH_MAX_MTU
4288
4289 static int virtnet_validate(struct virtio_device *vdev)
4290 {
4291         if (!vdev->config->get) {
4292                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
4293                         __func__);
4294                 return -EINVAL;
4295         }
4296
4297         if (!virtnet_validate_features(vdev))
4298                 return -EINVAL;
4299
4300         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4301                 int mtu = virtio_cread16(vdev,
4302                                          offsetof(struct virtio_net_config,
4303                                                   mtu));
4304                 if (mtu < MIN_MTU)
4305                         __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4306         }
4307
4308         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
4309             !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4310                 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4311                 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
4312         }
4313
4314         return 0;
4315 }
4316
4317 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4318 {
4319         return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4320                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4321                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4322                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4323                 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4324                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4325 }
4326
4327 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4328 {
4329         bool guest_gso = virtnet_check_guest_gso(vi);
4330
4331         /* If device can receive ANY guest GSO packets, regardless of mtu,
4332          * allocate packets of maximum size, otherwise limit it to only
4333          * mtu size worth only.
4334          */
4335         if (mtu > ETH_DATA_LEN || guest_gso) {
4336                 vi->big_packets = true;
4337                 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4338         }
4339 }
4340
4341 static int virtnet_probe(struct virtio_device *vdev)
4342 {
4343         int i, err = -ENOMEM;
4344         struct net_device *dev;
4345         struct virtnet_info *vi;
4346         u16 max_queue_pairs;
4347         int mtu = 0;
4348
4349         /* Find if host supports multiqueue/rss virtio_net device */
4350         max_queue_pairs = 1;
4351         if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4352                 max_queue_pairs =
4353                      virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4354
4355         /* We need at least 2 queue's */
4356         if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4357             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4358             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4359                 max_queue_pairs = 1;
4360
4361         /* Allocate ourselves a network device with room for our info */
4362         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4363         if (!dev)
4364                 return -ENOMEM;
4365
4366         /* Set up network device as normal. */
4367         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4368                            IFF_TX_SKB_NO_LINEAR;
4369         dev->netdev_ops = &virtnet_netdev;
4370         dev->features = NETIF_F_HIGHDMA;
4371
4372         dev->ethtool_ops = &virtnet_ethtool_ops;
4373         SET_NETDEV_DEV(dev, &vdev->dev);
4374
4375         /* Do we support "hardware" checksums? */
4376         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4377                 /* This opens up the world of extra features. */
4378                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4379                 if (csum)
4380                         dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4381
4382                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4383                         dev->hw_features |= NETIF_F_TSO
4384                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
4385                 }
4386                 /* Individual feature bits: what can host handle? */
4387                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4388                         dev->hw_features |= NETIF_F_TSO;
4389                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4390                         dev->hw_features |= NETIF_F_TSO6;
4391                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4392                         dev->hw_features |= NETIF_F_TSO_ECN;
4393                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4394                         dev->hw_features |= NETIF_F_GSO_UDP_L4;
4395
4396                 dev->features |= NETIF_F_GSO_ROBUST;
4397
4398                 if (gso)
4399                         dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4400                 /* (!csum && gso) case will be fixed by register_netdev() */
4401         }
4402         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4403                 dev->features |= NETIF_F_RXCSUM;
4404         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4405             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4406                 dev->features |= NETIF_F_GRO_HW;
4407         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4408                 dev->hw_features |= NETIF_F_GRO_HW;
4409
4410         dev->vlan_features = dev->features;
4411         dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4412
4413         /* MTU range: 68 - 65535 */
4414         dev->min_mtu = MIN_MTU;
4415         dev->max_mtu = MAX_MTU;
4416
4417         /* Configuration may specify what MAC to use.  Otherwise random. */
4418         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4419                 u8 addr[ETH_ALEN];
4420
4421                 virtio_cread_bytes(vdev,
4422                                    offsetof(struct virtio_net_config, mac),
4423                                    addr, ETH_ALEN);
4424                 eth_hw_addr_set(dev, addr);
4425         } else {
4426                 eth_hw_addr_random(dev);
4427                 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4428                          dev->dev_addr);
4429         }
4430
4431         /* Set up our device-specific information */
4432         vi = netdev_priv(dev);
4433         vi->dev = dev;
4434         vi->vdev = vdev;
4435         vdev->priv = vi;
4436
4437         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4438         spin_lock_init(&vi->refill_lock);
4439
4440         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4441                 vi->mergeable_rx_bufs = true;
4442                 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4443         }
4444
4445         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4446                 vi->intr_coal_rx.max_usecs = 0;
4447                 vi->intr_coal_tx.max_usecs = 0;
4448                 vi->intr_coal_tx.max_packets = 0;
4449                 vi->intr_coal_rx.max_packets = 0;
4450         }
4451
4452         if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4453                 vi->has_rss_hash_report = true;
4454
4455         if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4456                 vi->has_rss = true;
4457
4458         if (vi->has_rss || vi->has_rss_hash_report) {
4459                 vi->rss_indir_table_size =
4460                         virtio_cread16(vdev, offsetof(struct virtio_net_config,
4461                                 rss_max_indirection_table_length));
4462                 vi->rss_key_size =
4463                         virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4464
4465                 vi->rss_hash_types_supported =
4466                     virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4467                 vi->rss_hash_types_supported &=
4468                                 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4469                                   VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4470                                   VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4471
4472                 dev->hw_features |= NETIF_F_RXHASH;
4473         }
4474
4475         if (vi->has_rss_hash_report)
4476                 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4477         else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4478                  virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4479                 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4480         else
4481                 vi->hdr_len = sizeof(struct virtio_net_hdr);
4482
4483         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4484             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4485                 vi->any_header_sg = true;
4486
4487         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4488                 vi->has_cvq = true;
4489
4490         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4491                 mtu = virtio_cread16(vdev,
4492                                      offsetof(struct virtio_net_config,
4493                                               mtu));
4494                 if (mtu < dev->min_mtu) {
4495                         /* Should never trigger: MTU was previously validated
4496                          * in virtnet_validate.
4497                          */
4498                         dev_err(&vdev->dev,
4499                                 "device MTU appears to have changed it is now %d < %d",
4500                                 mtu, dev->min_mtu);
4501                         err = -EINVAL;
4502                         goto free;
4503                 }
4504
4505                 dev->mtu = mtu;
4506                 dev->max_mtu = mtu;
4507         }
4508
4509         virtnet_set_big_packets(vi, mtu);
4510
4511         if (vi->any_header_sg)
4512                 dev->needed_headroom = vi->hdr_len;
4513
4514         /* Enable multiqueue by default */
4515         if (num_online_cpus() >= max_queue_pairs)
4516                 vi->curr_queue_pairs = max_queue_pairs;
4517         else
4518                 vi->curr_queue_pairs = num_online_cpus();
4519         vi->max_queue_pairs = max_queue_pairs;
4520
4521         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4522         err = init_vqs(vi);
4523         if (err)
4524                 goto free;
4525
4526 #ifdef CONFIG_SYSFS
4527         if (vi->mergeable_rx_bufs)
4528                 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4529 #endif
4530         netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4531         netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4532
4533         virtnet_init_settings(dev);
4534
4535         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4536                 vi->failover = net_failover_create(vi->dev);
4537                 if (IS_ERR(vi->failover)) {
4538                         err = PTR_ERR(vi->failover);
4539                         goto free_vqs;
4540                 }
4541         }
4542
4543         if (vi->has_rss || vi->has_rss_hash_report)
4544                 virtnet_init_default_rss(vi);
4545
4546         /* serialize netdev register + virtio_device_ready() with ndo_open() */
4547         rtnl_lock();
4548
4549         err = register_netdevice(dev);
4550         if (err) {
4551                 pr_debug("virtio_net: registering device failed\n");
4552                 rtnl_unlock();
4553                 goto free_failover;
4554         }
4555
4556         virtio_device_ready(vdev);
4557
4558         _virtnet_set_queues(vi, vi->curr_queue_pairs);
4559
4560         /* a random MAC address has been assigned, notify the device.
4561          * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4562          * because many devices work fine without getting MAC explicitly
4563          */
4564         if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4565             virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4566                 struct scatterlist sg;
4567
4568                 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4569                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4570                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4571                         pr_debug("virtio_net: setting MAC address failed\n");
4572                         rtnl_unlock();
4573                         err = -EINVAL;
4574                         goto free_unregister_netdev;
4575                 }
4576         }
4577
4578         rtnl_unlock();
4579
4580         err = virtnet_cpu_notif_add(vi);
4581         if (err) {
4582                 pr_debug("virtio_net: registering cpu notifier failed\n");
4583                 goto free_unregister_netdev;
4584         }
4585
4586         /* Assume link up if device can't report link status,
4587            otherwise get link status from config. */
4588         netif_carrier_off(dev);
4589         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4590                 schedule_work(&vi->config_work);
4591         } else {
4592                 vi->status = VIRTIO_NET_S_LINK_UP;
4593                 virtnet_update_settings(vi);
4594                 netif_carrier_on(dev);
4595         }
4596
4597         for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4598                 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4599                         set_bit(guest_offloads[i], &vi->guest_offloads);
4600         vi->guest_offloads_capable = vi->guest_offloads;
4601
4602         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4603                  dev->name, max_queue_pairs);
4604
4605         return 0;
4606
4607 free_unregister_netdev:
4608         unregister_netdev(dev);
4609 free_failover:
4610         net_failover_destroy(vi->failover);
4611 free_vqs:
4612         virtio_reset_device(vdev);
4613         cancel_delayed_work_sync(&vi->refill);
4614         free_receive_page_frags(vi);
4615         virtnet_del_vqs(vi);
4616 free:
4617         free_netdev(dev);
4618         return err;
4619 }
4620
4621 static void remove_vq_common(struct virtnet_info *vi)
4622 {
4623         virtio_reset_device(vi->vdev);
4624
4625         /* Free unused buffers in both send and recv, if any. */
4626         free_unused_bufs(vi);
4627
4628         free_receive_bufs(vi);
4629
4630         free_receive_page_frags(vi);
4631
4632         virtnet_del_vqs(vi);
4633 }
4634
4635 static void virtnet_remove(struct virtio_device *vdev)
4636 {
4637         struct virtnet_info *vi = vdev->priv;
4638
4639         virtnet_cpu_notif_remove(vi);
4640
4641         /* Make sure no work handler is accessing the device. */
4642         flush_work(&vi->config_work);
4643
4644         unregister_netdev(vi->dev);
4645
4646         net_failover_destroy(vi->failover);
4647
4648         remove_vq_common(vi);
4649
4650         free_netdev(vi->dev);
4651 }
4652
4653 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
4654 {
4655         struct virtnet_info *vi = vdev->priv;
4656
4657         virtnet_cpu_notif_remove(vi);
4658         virtnet_freeze_down(vdev);
4659         remove_vq_common(vi);
4660
4661         return 0;
4662 }
4663
4664 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4665 {
4666         struct virtnet_info *vi = vdev->priv;
4667         int err;
4668
4669         err = virtnet_restore_up(vdev);
4670         if (err)
4671                 return err;
4672         virtnet_set_queues(vi, vi->curr_queue_pairs);
4673
4674         err = virtnet_cpu_notif_add(vi);
4675         if (err) {
4676                 virtnet_freeze_down(vdev);
4677                 remove_vq_common(vi);
4678                 return err;
4679         }
4680
4681         return 0;
4682 }
4683
4684 static struct virtio_device_id id_table[] = {
4685         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4686         { 0 },
4687 };
4688
4689 #define VIRTNET_FEATURES \
4690         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4691         VIRTIO_NET_F_MAC, \
4692         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4693         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4694         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4695         VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4696         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4697         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4698         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4699         VIRTIO_NET_F_CTRL_MAC_ADDR, \
4700         VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4701         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4702         VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4703         VIRTIO_NET_F_VQ_NOTF_COAL, \
4704         VIRTIO_NET_F_GUEST_HDRLEN
4705
4706 static unsigned int features[] = {
4707         VIRTNET_FEATURES,
4708 };
4709
4710 static unsigned int features_legacy[] = {
4711         VIRTNET_FEATURES,
4712         VIRTIO_NET_F_GSO,
4713         VIRTIO_F_ANY_LAYOUT,
4714 };
4715
4716 static struct virtio_driver virtio_net_driver = {
4717         .feature_table = features,
4718         .feature_table_size = ARRAY_SIZE(features),
4719         .feature_table_legacy = features_legacy,
4720         .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4721         .driver.name =  KBUILD_MODNAME,
4722         .driver.owner = THIS_MODULE,
4723         .id_table =     id_table,
4724         .validate =     virtnet_validate,
4725         .probe =        virtnet_probe,
4726         .remove =       virtnet_remove,
4727         .config_changed = virtnet_config_changed,
4728 #ifdef CONFIG_PM_SLEEP
4729         .freeze =       virtnet_freeze,
4730         .restore =      virtnet_restore,
4731 #endif
4732 };
4733
4734 static __init int virtio_net_driver_init(void)
4735 {
4736         int ret;
4737
4738         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4739                                       virtnet_cpu_online,
4740                                       virtnet_cpu_down_prep);
4741         if (ret < 0)
4742                 goto out;
4743         virtionet_online = ret;
4744         ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
4745                                       NULL, virtnet_cpu_dead);
4746         if (ret)
4747                 goto err_dead;
4748         ret = register_virtio_driver(&virtio_net_driver);
4749         if (ret)
4750                 goto err_virtio;
4751         return 0;
4752 err_virtio:
4753         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
4754 err_dead:
4755         cpuhp_remove_multi_state(virtionet_online);
4756 out:
4757         return ret;
4758 }
4759 module_init(virtio_net_driver_init);
4760
4761 static __exit void virtio_net_driver_exit(void)
4762 {
4763         unregister_virtio_driver(&virtio_net_driver);
4764         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
4765         cpuhp_remove_multi_state(virtionet_online);
4766 }
4767 module_exit(virtio_net_driver_exit);
4768
4769 MODULE_DEVICE_TABLE(virtio, id_table);
4770 MODULE_DESCRIPTION("Virtio network driver");
4771 MODULE_LICENSE("GPL");