net: move struct netdev_rx_queue out of netdevice.h
[platform/kernel/linux-starfive.git] / drivers / net / virtio_net.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3  *
4  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5  */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <net/route.h>
23 #include <net/xdp.h>
24 #include <net/net_failover.h>
25 #include <net/netdev_rx_queue.h>
26
27 static int napi_weight = NAPI_POLL_WEIGHT;
28 module_param(napi_weight, int, 0444);
29
30 static bool csum = true, gso = true, napi_tx = true;
31 module_param(csum, bool, 0444);
32 module_param(gso, bool, 0444);
33 module_param(napi_tx, bool, 0644);
34
35 /* FIXME: MTU in config. */
36 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
37 #define GOOD_COPY_LEN   128
38
39 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
40
41 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
42 #define VIRTIO_XDP_HEADROOM 256
43
44 /* Separating two types of XDP xmit */
45 #define VIRTIO_XDP_TX           BIT(0)
46 #define VIRTIO_XDP_REDIR        BIT(1)
47
48 #define VIRTIO_XDP_FLAG BIT(0)
49
50 /* RX packet size EWMA. The average packet size is used to determine the packet
51  * buffer size when refilling RX rings. As the entire RX ring may be refilled
52  * at once, the weight is chosen so that the EWMA will be insensitive to short-
53  * term, transient changes in packet size.
54  */
55 DECLARE_EWMA(pkt_len, 0, 64)
56
57 #define VIRTNET_DRIVER_VERSION "1.0.0"
58
59 static const unsigned long guest_offloads[] = {
60         VIRTIO_NET_F_GUEST_TSO4,
61         VIRTIO_NET_F_GUEST_TSO6,
62         VIRTIO_NET_F_GUEST_ECN,
63         VIRTIO_NET_F_GUEST_UFO,
64         VIRTIO_NET_F_GUEST_CSUM,
65         VIRTIO_NET_F_GUEST_USO4,
66         VIRTIO_NET_F_GUEST_USO6,
67         VIRTIO_NET_F_GUEST_HDRLEN
68 };
69
70 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
71                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
72                                 (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
73                                 (1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
74                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
75                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
76
77 struct virtnet_stat_desc {
78         char desc[ETH_GSTRING_LEN];
79         size_t offset;
80 };
81
82 struct virtnet_sq_stats {
83         struct u64_stats_sync syncp;
84         u64 packets;
85         u64 bytes;
86         u64 xdp_tx;
87         u64 xdp_tx_drops;
88         u64 kicks;
89         u64 tx_timeouts;
90 };
91
92 struct virtnet_rq_stats {
93         struct u64_stats_sync syncp;
94         u64 packets;
95         u64 bytes;
96         u64 drops;
97         u64 xdp_packets;
98         u64 xdp_tx;
99         u64 xdp_redirects;
100         u64 xdp_drops;
101         u64 kicks;
102 };
103
104 #define VIRTNET_SQ_STAT(m)      offsetof(struct virtnet_sq_stats, m)
105 #define VIRTNET_RQ_STAT(m)      offsetof(struct virtnet_rq_stats, m)
106
107 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
108         { "packets",            VIRTNET_SQ_STAT(packets) },
109         { "bytes",              VIRTNET_SQ_STAT(bytes) },
110         { "xdp_tx",             VIRTNET_SQ_STAT(xdp_tx) },
111         { "xdp_tx_drops",       VIRTNET_SQ_STAT(xdp_tx_drops) },
112         { "kicks",              VIRTNET_SQ_STAT(kicks) },
113         { "tx_timeouts",        VIRTNET_SQ_STAT(tx_timeouts) },
114 };
115
116 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
117         { "packets",            VIRTNET_RQ_STAT(packets) },
118         { "bytes",              VIRTNET_RQ_STAT(bytes) },
119         { "drops",              VIRTNET_RQ_STAT(drops) },
120         { "xdp_packets",        VIRTNET_RQ_STAT(xdp_packets) },
121         { "xdp_tx",             VIRTNET_RQ_STAT(xdp_tx) },
122         { "xdp_redirects",      VIRTNET_RQ_STAT(xdp_redirects) },
123         { "xdp_drops",          VIRTNET_RQ_STAT(xdp_drops) },
124         { "kicks",              VIRTNET_RQ_STAT(kicks) },
125 };
126
127 #define VIRTNET_SQ_STATS_LEN    ARRAY_SIZE(virtnet_sq_stats_desc)
128 #define VIRTNET_RQ_STATS_LEN    ARRAY_SIZE(virtnet_rq_stats_desc)
129
130 /* Internal representation of a send virtqueue */
131 struct send_queue {
132         /* Virtqueue associated with this send _queue */
133         struct virtqueue *vq;
134
135         /* TX: fragments + linear part + virtio header */
136         struct scatterlist sg[MAX_SKB_FRAGS + 2];
137
138         /* Name of the send queue: output.$index */
139         char name[16];
140
141         struct virtnet_sq_stats stats;
142
143         struct napi_struct napi;
144
145         /* Record whether sq is in reset state. */
146         bool reset;
147 };
148
149 /* Internal representation of a receive virtqueue */
150 struct receive_queue {
151         /* Virtqueue associated with this receive_queue */
152         struct virtqueue *vq;
153
154         struct napi_struct napi;
155
156         struct bpf_prog __rcu *xdp_prog;
157
158         struct virtnet_rq_stats stats;
159
160         /* Chain pages by the private ptr. */
161         struct page *pages;
162
163         /* Average packet length for mergeable receive buffers. */
164         struct ewma_pkt_len mrg_avg_pkt_len;
165
166         /* Page frag for packet buffer allocation. */
167         struct page_frag alloc_frag;
168
169         /* RX: fragments + linear part + virtio header */
170         struct scatterlist sg[MAX_SKB_FRAGS + 2];
171
172         /* Min single buffer size for mergeable buffers case. */
173         unsigned int min_buf_len;
174
175         /* Name of this receive queue: input.$index */
176         char name[16];
177
178         struct xdp_rxq_info xdp_rxq;
179 };
180
181 /* This structure can contain rss message with maximum settings for indirection table and keysize
182  * Note, that default structure that describes RSS configuration virtio_net_rss_config
183  * contains same info but can't handle table values.
184  * In any case, structure would be passed to virtio hw through sg_buf split by parts
185  * because table sizes may be differ according to the device configuration.
186  */
187 #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
188 #define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
189 struct virtio_net_ctrl_rss {
190         u32 hash_types;
191         u16 indirection_table_mask;
192         u16 unclassified_queue;
193         u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
194         u16 max_tx_vq;
195         u8 hash_key_length;
196         u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
197 };
198
199 /* Control VQ buffers: protected by the rtnl lock */
200 struct control_buf {
201         struct virtio_net_ctrl_hdr hdr;
202         virtio_net_ctrl_ack status;
203         struct virtio_net_ctrl_mq mq;
204         u8 promisc;
205         u8 allmulti;
206         __virtio16 vid;
207         __virtio64 offloads;
208         struct virtio_net_ctrl_rss rss;
209         struct virtio_net_ctrl_coal_tx coal_tx;
210         struct virtio_net_ctrl_coal_rx coal_rx;
211 };
212
213 struct virtnet_info {
214         struct virtio_device *vdev;
215         struct virtqueue *cvq;
216         struct net_device *dev;
217         struct send_queue *sq;
218         struct receive_queue *rq;
219         unsigned int status;
220
221         /* Max # of queue pairs supported by the device */
222         u16 max_queue_pairs;
223
224         /* # of queue pairs currently used by the driver */
225         u16 curr_queue_pairs;
226
227         /* # of XDP queue pairs currently used by the driver */
228         u16 xdp_queue_pairs;
229
230         /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
231         bool xdp_enabled;
232
233         /* I like... big packets and I cannot lie! */
234         bool big_packets;
235
236         /* number of sg entries allocated for big packets */
237         unsigned int big_packets_num_skbfrags;
238
239         /* Host will merge rx buffers for big packets (shake it! shake it!) */
240         bool mergeable_rx_bufs;
241
242         /* Host supports rss and/or hash report */
243         bool has_rss;
244         bool has_rss_hash_report;
245         u8 rss_key_size;
246         u16 rss_indir_table_size;
247         u32 rss_hash_types_supported;
248         u32 rss_hash_types_saved;
249
250         /* Has control virtqueue */
251         bool has_cvq;
252
253         /* Host can handle any s/g split between our header and packet data */
254         bool any_header_sg;
255
256         /* Packet virtio header size */
257         u8 hdr_len;
258
259         /* Work struct for delayed refilling if we run low on memory. */
260         struct delayed_work refill;
261
262         /* Is delayed refill enabled? */
263         bool refill_enabled;
264
265         /* The lock to synchronize the access to refill_enabled */
266         spinlock_t refill_lock;
267
268         /* Work struct for config space updates */
269         struct work_struct config_work;
270
271         /* Does the affinity hint is set for virtqueues? */
272         bool affinity_hint_set;
273
274         /* CPU hotplug instances for online & dead */
275         struct hlist_node node;
276         struct hlist_node node_dead;
277
278         struct control_buf *ctrl;
279
280         /* Ethtool settings */
281         u8 duplex;
282         u32 speed;
283
284         /* Interrupt coalescing settings */
285         u32 tx_usecs;
286         u32 rx_usecs;
287         u32 tx_max_packets;
288         u32 rx_max_packets;
289
290         unsigned long guest_offloads;
291         unsigned long guest_offloads_capable;
292
293         /* failover when STANDBY feature enabled */
294         struct failover *failover;
295 };
296
297 struct padded_vnet_hdr {
298         struct virtio_net_hdr_v1_hash hdr;
299         /*
300          * hdr is in a separate sg buffer, and data sg buffer shares same page
301          * with this header sg. This padding makes next sg 16 byte aligned
302          * after the header.
303          */
304         char padding[12];
305 };
306
307 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
308 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
309
310 static bool is_xdp_frame(void *ptr)
311 {
312         return (unsigned long)ptr & VIRTIO_XDP_FLAG;
313 }
314
315 static void *xdp_to_ptr(struct xdp_frame *ptr)
316 {
317         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
318 }
319
320 static struct xdp_frame *ptr_to_xdp(void *ptr)
321 {
322         return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
323 }
324
325 /* Converting between virtqueue no. and kernel tx/rx queue no.
326  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
327  */
328 static int vq2txq(struct virtqueue *vq)
329 {
330         return (vq->index - 1) / 2;
331 }
332
333 static int txq2vq(int txq)
334 {
335         return txq * 2 + 1;
336 }
337
338 static int vq2rxq(struct virtqueue *vq)
339 {
340         return vq->index / 2;
341 }
342
343 static int rxq2vq(int rxq)
344 {
345         return rxq * 2;
346 }
347
348 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
349 {
350         return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
351 }
352
353 /*
354  * private is used to chain pages for big packets, put the whole
355  * most recent used list in the beginning for reuse
356  */
357 static void give_pages(struct receive_queue *rq, struct page *page)
358 {
359         struct page *end;
360
361         /* Find end of list, sew whole thing into vi->rq.pages. */
362         for (end = page; end->private; end = (struct page *)end->private);
363         end->private = (unsigned long)rq->pages;
364         rq->pages = page;
365 }
366
367 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
368 {
369         struct page *p = rq->pages;
370
371         if (p) {
372                 rq->pages = (struct page *)p->private;
373                 /* clear private here, it is used to chain pages */
374                 p->private = 0;
375         } else
376                 p = alloc_page(gfp_mask);
377         return p;
378 }
379
380 static void enable_delayed_refill(struct virtnet_info *vi)
381 {
382         spin_lock_bh(&vi->refill_lock);
383         vi->refill_enabled = true;
384         spin_unlock_bh(&vi->refill_lock);
385 }
386
387 static void disable_delayed_refill(struct virtnet_info *vi)
388 {
389         spin_lock_bh(&vi->refill_lock);
390         vi->refill_enabled = false;
391         spin_unlock_bh(&vi->refill_lock);
392 }
393
394 static void virtqueue_napi_schedule(struct napi_struct *napi,
395                                     struct virtqueue *vq)
396 {
397         if (napi_schedule_prep(napi)) {
398                 virtqueue_disable_cb(vq);
399                 __napi_schedule(napi);
400         }
401 }
402
403 static void virtqueue_napi_complete(struct napi_struct *napi,
404                                     struct virtqueue *vq, int processed)
405 {
406         int opaque;
407
408         opaque = virtqueue_enable_cb_prepare(vq);
409         if (napi_complete_done(napi, processed)) {
410                 if (unlikely(virtqueue_poll(vq, opaque)))
411                         virtqueue_napi_schedule(napi, vq);
412         } else {
413                 virtqueue_disable_cb(vq);
414         }
415 }
416
417 static void skb_xmit_done(struct virtqueue *vq)
418 {
419         struct virtnet_info *vi = vq->vdev->priv;
420         struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
421
422         /* Suppress further interrupts. */
423         virtqueue_disable_cb(vq);
424
425         if (napi->weight)
426                 virtqueue_napi_schedule(napi, vq);
427         else
428                 /* We were probably waiting for more output buffers. */
429                 netif_wake_subqueue(vi->dev, vq2txq(vq));
430 }
431
432 #define MRG_CTX_HEADER_SHIFT 22
433 static void *mergeable_len_to_ctx(unsigned int truesize,
434                                   unsigned int headroom)
435 {
436         return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
437 }
438
439 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
440 {
441         return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
442 }
443
444 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
445 {
446         return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
447 }
448
449 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
450                                          unsigned int headroom,
451                                          unsigned int len)
452 {
453         struct sk_buff *skb;
454
455         skb = build_skb(buf, buflen);
456         if (unlikely(!skb))
457                 return NULL;
458
459         skb_reserve(skb, headroom);
460         skb_put(skb, len);
461
462         return skb;
463 }
464
465 /* Called from bottom half context */
466 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
467                                    struct receive_queue *rq,
468                                    struct page *page, unsigned int offset,
469                                    unsigned int len, unsigned int truesize,
470                                    unsigned int headroom)
471 {
472         struct sk_buff *skb;
473         struct virtio_net_hdr_mrg_rxbuf *hdr;
474         unsigned int copy, hdr_len, hdr_padded_len;
475         struct page *page_to_free = NULL;
476         int tailroom, shinfo_size;
477         char *p, *hdr_p, *buf;
478
479         p = page_address(page) + offset;
480         hdr_p = p;
481
482         hdr_len = vi->hdr_len;
483         if (vi->mergeable_rx_bufs)
484                 hdr_padded_len = hdr_len;
485         else
486                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
487
488         buf = p - headroom;
489         len -= hdr_len;
490         offset += hdr_padded_len;
491         p += hdr_padded_len;
492         tailroom = truesize - headroom  - hdr_padded_len - len;
493
494         shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
495
496         /* copy small packet so we can reuse these pages */
497         if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
498                 skb = virtnet_build_skb(buf, truesize, p - buf, len);
499                 if (unlikely(!skb))
500                         return NULL;
501
502                 page = (struct page *)page->private;
503                 if (page)
504                         give_pages(rq, page);
505                 goto ok;
506         }
507
508         /* copy small packet so we can reuse these pages for small data */
509         skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
510         if (unlikely(!skb))
511                 return NULL;
512
513         /* Copy all frame if it fits skb->head, otherwise
514          * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
515          */
516         if (len <= skb_tailroom(skb))
517                 copy = len;
518         else
519                 copy = ETH_HLEN;
520         skb_put_data(skb, p, copy);
521
522         len -= copy;
523         offset += copy;
524
525         if (vi->mergeable_rx_bufs) {
526                 if (len)
527                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
528                 else
529                         page_to_free = page;
530                 goto ok;
531         }
532
533         /*
534          * Verify that we can indeed put this data into a skb.
535          * This is here to handle cases when the device erroneously
536          * tries to receive more than is possible. This is usually
537          * the case of a broken device.
538          */
539         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
540                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
541                 dev_kfree_skb(skb);
542                 return NULL;
543         }
544         BUG_ON(offset >= PAGE_SIZE);
545         while (len) {
546                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
547                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
548                                 frag_size, truesize);
549                 len -= frag_size;
550                 page = (struct page *)page->private;
551                 offset = 0;
552         }
553
554         if (page)
555                 give_pages(rq, page);
556
557 ok:
558         hdr = skb_vnet_hdr(skb);
559         memcpy(hdr, hdr_p, hdr_len);
560         if (page_to_free)
561                 put_page(page_to_free);
562
563         return skb;
564 }
565
566 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
567 {
568         unsigned int len;
569         unsigned int packets = 0;
570         unsigned int bytes = 0;
571         void *ptr;
572
573         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
574                 if (likely(!is_xdp_frame(ptr))) {
575                         struct sk_buff *skb = ptr;
576
577                         pr_debug("Sent skb %p\n", skb);
578
579                         bytes += skb->len;
580                         napi_consume_skb(skb, in_napi);
581                 } else {
582                         struct xdp_frame *frame = ptr_to_xdp(ptr);
583
584                         bytes += xdp_get_frame_len(frame);
585                         xdp_return_frame(frame);
586                 }
587                 packets++;
588         }
589
590         /* Avoid overhead when no packets have been processed
591          * happens when called speculatively from start_xmit.
592          */
593         if (!packets)
594                 return;
595
596         u64_stats_update_begin(&sq->stats.syncp);
597         sq->stats.bytes += bytes;
598         sq->stats.packets += packets;
599         u64_stats_update_end(&sq->stats.syncp);
600 }
601
602 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
603 {
604         if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
605                 return false;
606         else if (q < vi->curr_queue_pairs)
607                 return true;
608         else
609                 return false;
610 }
611
612 static void check_sq_full_and_disable(struct virtnet_info *vi,
613                                       struct net_device *dev,
614                                       struct send_queue *sq)
615 {
616         bool use_napi = sq->napi.weight;
617         int qnum;
618
619         qnum = sq - vi->sq;
620
621         /* If running out of space, stop queue to avoid getting packets that we
622          * are then unable to transmit.
623          * An alternative would be to force queuing layer to requeue the skb by
624          * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
625          * returned in a normal path of operation: it means that driver is not
626          * maintaining the TX queue stop/start state properly, and causes
627          * the stack to do a non-trivial amount of useless work.
628          * Since most packets only take 1 or 2 ring slots, stopping the queue
629          * early means 16 slots are typically wasted.
630          */
631         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
632                 netif_stop_subqueue(dev, qnum);
633                 if (use_napi) {
634                         if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
635                                 virtqueue_napi_schedule(&sq->napi, sq->vq);
636                 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
637                         /* More just got used, free them then recheck. */
638                         free_old_xmit_skbs(sq, false);
639                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
640                                 netif_start_subqueue(dev, qnum);
641                                 virtqueue_disable_cb(sq->vq);
642                         }
643                 }
644         }
645 }
646
647 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
648                                    struct send_queue *sq,
649                                    struct xdp_frame *xdpf)
650 {
651         struct virtio_net_hdr_mrg_rxbuf *hdr;
652         struct skb_shared_info *shinfo;
653         u8 nr_frags = 0;
654         int err, i;
655
656         if (unlikely(xdpf->headroom < vi->hdr_len))
657                 return -EOVERFLOW;
658
659         if (unlikely(xdp_frame_has_frags(xdpf))) {
660                 shinfo = xdp_get_shared_info_from_frame(xdpf);
661                 nr_frags = shinfo->nr_frags;
662         }
663
664         /* In wrapping function virtnet_xdp_xmit(), we need to free
665          * up the pending old buffers, where we need to calculate the
666          * position of skb_shared_info in xdp_get_frame_len() and
667          * xdp_return_frame(), which will involve to xdpf->data and
668          * xdpf->headroom. Therefore, we need to update the value of
669          * headroom synchronously here.
670          */
671         xdpf->headroom -= vi->hdr_len;
672         xdpf->data -= vi->hdr_len;
673         /* Zero header and leave csum up to XDP layers */
674         hdr = xdpf->data;
675         memset(hdr, 0, vi->hdr_len);
676         xdpf->len   += vi->hdr_len;
677
678         sg_init_table(sq->sg, nr_frags + 1);
679         sg_set_buf(sq->sg, xdpf->data, xdpf->len);
680         for (i = 0; i < nr_frags; i++) {
681                 skb_frag_t *frag = &shinfo->frags[i];
682
683                 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
684                             skb_frag_size(frag), skb_frag_off(frag));
685         }
686
687         err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
688                                    xdp_to_ptr(xdpf), GFP_ATOMIC);
689         if (unlikely(err))
690                 return -ENOSPC; /* Caller handle free/refcnt */
691
692         return 0;
693 }
694
695 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
696  * the current cpu, so it does not need to be locked.
697  *
698  * Here we use marco instead of inline functions because we have to deal with
699  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
700  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
701  * functions to perfectly solve these three problems at the same time.
702  */
703 #define virtnet_xdp_get_sq(vi) ({                                       \
704         int cpu = smp_processor_id();                                   \
705         struct netdev_queue *txq;                                       \
706         typeof(vi) v = (vi);                                            \
707         unsigned int qp;                                                \
708                                                                         \
709         if (v->curr_queue_pairs > nr_cpu_ids) {                         \
710                 qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
711                 qp += cpu;                                              \
712                 txq = netdev_get_tx_queue(v->dev, qp);                  \
713                 __netif_tx_acquire(txq);                                \
714         } else {                                                        \
715                 qp = cpu % v->curr_queue_pairs;                         \
716                 txq = netdev_get_tx_queue(v->dev, qp);                  \
717                 __netif_tx_lock(txq, cpu);                              \
718         }                                                               \
719         v->sq + qp;                                                     \
720 })
721
722 #define virtnet_xdp_put_sq(vi, q) {                                     \
723         struct netdev_queue *txq;                                       \
724         typeof(vi) v = (vi);                                            \
725                                                                         \
726         txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
727         if (v->curr_queue_pairs > nr_cpu_ids)                           \
728                 __netif_tx_release(txq);                                \
729         else                                                            \
730                 __netif_tx_unlock(txq);                                 \
731 }
732
733 static int virtnet_xdp_xmit(struct net_device *dev,
734                             int n, struct xdp_frame **frames, u32 flags)
735 {
736         struct virtnet_info *vi = netdev_priv(dev);
737         struct receive_queue *rq = vi->rq;
738         struct bpf_prog *xdp_prog;
739         struct send_queue *sq;
740         unsigned int len;
741         int packets = 0;
742         int bytes = 0;
743         int nxmit = 0;
744         int kicks = 0;
745         void *ptr;
746         int ret;
747         int i;
748
749         /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
750          * indicate XDP resources have been successfully allocated.
751          */
752         xdp_prog = rcu_access_pointer(rq->xdp_prog);
753         if (!xdp_prog)
754                 return -ENXIO;
755
756         sq = virtnet_xdp_get_sq(vi);
757
758         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
759                 ret = -EINVAL;
760                 goto out;
761         }
762
763         /* Free up any pending old buffers before queueing new ones. */
764         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
765                 if (likely(is_xdp_frame(ptr))) {
766                         struct xdp_frame *frame = ptr_to_xdp(ptr);
767
768                         bytes += xdp_get_frame_len(frame);
769                         xdp_return_frame(frame);
770                 } else {
771                         struct sk_buff *skb = ptr;
772
773                         bytes += skb->len;
774                         napi_consume_skb(skb, false);
775                 }
776                 packets++;
777         }
778
779         for (i = 0; i < n; i++) {
780                 struct xdp_frame *xdpf = frames[i];
781
782                 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
783                         break;
784                 nxmit++;
785         }
786         ret = nxmit;
787
788         if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
789                 check_sq_full_and_disable(vi, dev, sq);
790
791         if (flags & XDP_XMIT_FLUSH) {
792                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
793                         kicks = 1;
794         }
795 out:
796         u64_stats_update_begin(&sq->stats.syncp);
797         sq->stats.bytes += bytes;
798         sq->stats.packets += packets;
799         sq->stats.xdp_tx += n;
800         sq->stats.xdp_tx_drops += n - nxmit;
801         sq->stats.kicks += kicks;
802         u64_stats_update_end(&sq->stats.syncp);
803
804         virtnet_xdp_put_sq(vi, sq);
805         return ret;
806 }
807
808 static void put_xdp_frags(struct xdp_buff *xdp)
809 {
810         struct skb_shared_info *shinfo;
811         struct page *xdp_page;
812         int i;
813
814         if (xdp_buff_has_frags(xdp)) {
815                 shinfo = xdp_get_shared_info_from_buff(xdp);
816                 for (i = 0; i < shinfo->nr_frags; i++) {
817                         xdp_page = skb_frag_page(&shinfo->frags[i]);
818                         put_page(xdp_page);
819                 }
820         }
821 }
822
823 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
824                                struct net_device *dev,
825                                unsigned int *xdp_xmit,
826                                struct virtnet_rq_stats *stats)
827 {
828         struct xdp_frame *xdpf;
829         int err;
830         u32 act;
831
832         act = bpf_prog_run_xdp(xdp_prog, xdp);
833         stats->xdp_packets++;
834
835         switch (act) {
836         case XDP_PASS:
837                 return act;
838
839         case XDP_TX:
840                 stats->xdp_tx++;
841                 xdpf = xdp_convert_buff_to_frame(xdp);
842                 if (unlikely(!xdpf)) {
843                         netdev_dbg(dev, "convert buff to frame failed for xdp\n");
844                         return XDP_DROP;
845                 }
846
847                 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
848                 if (unlikely(!err)) {
849                         xdp_return_frame_rx_napi(xdpf);
850                 } else if (unlikely(err < 0)) {
851                         trace_xdp_exception(dev, xdp_prog, act);
852                         return XDP_DROP;
853                 }
854                 *xdp_xmit |= VIRTIO_XDP_TX;
855                 return act;
856
857         case XDP_REDIRECT:
858                 stats->xdp_redirects++;
859                 err = xdp_do_redirect(dev, xdp, xdp_prog);
860                 if (err)
861                         return XDP_DROP;
862
863                 *xdp_xmit |= VIRTIO_XDP_REDIR;
864                 return act;
865
866         default:
867                 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
868                 fallthrough;
869         case XDP_ABORTED:
870                 trace_xdp_exception(dev, xdp_prog, act);
871                 fallthrough;
872         case XDP_DROP:
873                 return XDP_DROP;
874         }
875 }
876
877 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
878 {
879         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
880 }
881
882 /* We copy the packet for XDP in the following cases:
883  *
884  * 1) Packet is scattered across multiple rx buffers.
885  * 2) Headroom space is insufficient.
886  *
887  * This is inefficient but it's a temporary condition that
888  * we hit right after XDP is enabled and until queue is refilled
889  * with large buffers with sufficient headroom - so it should affect
890  * at most queue size packets.
891  * Afterwards, the conditions to enable
892  * XDP should preclude the underlying device from sending packets
893  * across multiple buffers (num_buf > 1), and we make sure buffers
894  * have enough headroom.
895  */
896 static struct page *xdp_linearize_page(struct receive_queue *rq,
897                                        int *num_buf,
898                                        struct page *p,
899                                        int offset,
900                                        int page_off,
901                                        unsigned int *len)
902 {
903         int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
904         struct page *page;
905
906         if (page_off + *len + tailroom > PAGE_SIZE)
907                 return NULL;
908
909         page = alloc_page(GFP_ATOMIC);
910         if (!page)
911                 return NULL;
912
913         memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
914         page_off += *len;
915
916         while (--*num_buf) {
917                 unsigned int buflen;
918                 void *buf;
919                 int off;
920
921                 buf = virtqueue_get_buf(rq->vq, &buflen);
922                 if (unlikely(!buf))
923                         goto err_buf;
924
925                 p = virt_to_head_page(buf);
926                 off = buf - page_address(p);
927
928                 /* guard against a misconfigured or uncooperative backend that
929                  * is sending packet larger than the MTU.
930                  */
931                 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
932                         put_page(p);
933                         goto err_buf;
934                 }
935
936                 memcpy(page_address(page) + page_off,
937                        page_address(p) + off, buflen);
938                 page_off += buflen;
939                 put_page(p);
940         }
941
942         /* Headroom does not contribute to packet length */
943         *len = page_off - VIRTIO_XDP_HEADROOM;
944         return page;
945 err_buf:
946         __free_pages(page, 0);
947         return NULL;
948 }
949
950 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
951                                                unsigned int xdp_headroom,
952                                                void *buf,
953                                                unsigned int len)
954 {
955         unsigned int header_offset;
956         unsigned int headroom;
957         unsigned int buflen;
958         struct sk_buff *skb;
959
960         header_offset = VIRTNET_RX_PAD + xdp_headroom;
961         headroom = vi->hdr_len + header_offset;
962         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
963                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
964
965         skb = virtnet_build_skb(buf, buflen, headroom, len);
966         if (unlikely(!skb))
967                 return NULL;
968
969         buf += header_offset;
970         memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
971
972         return skb;
973 }
974
975 static struct sk_buff *receive_small_xdp(struct net_device *dev,
976                                          struct virtnet_info *vi,
977                                          struct receive_queue *rq,
978                                          struct bpf_prog *xdp_prog,
979                                          void *buf,
980                                          unsigned int xdp_headroom,
981                                          unsigned int len,
982                                          unsigned int *xdp_xmit,
983                                          struct virtnet_rq_stats *stats)
984 {
985         unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
986         unsigned int headroom = vi->hdr_len + header_offset;
987         struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
988         struct page *page = virt_to_head_page(buf);
989         struct page *xdp_page;
990         unsigned int buflen;
991         struct xdp_buff xdp;
992         struct sk_buff *skb;
993         unsigned int metasize = 0;
994         u32 act;
995
996         if (unlikely(hdr->hdr.gso_type))
997                 goto err_xdp;
998
999         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1000                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1001
1002         if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1003                 int offset = buf - page_address(page) + header_offset;
1004                 unsigned int tlen = len + vi->hdr_len;
1005                 int num_buf = 1;
1006
1007                 xdp_headroom = virtnet_get_headroom(vi);
1008                 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1009                 headroom = vi->hdr_len + header_offset;
1010                 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1011                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1012                 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1013                                               offset, header_offset,
1014                                               &tlen);
1015                 if (!xdp_page)
1016                         goto err_xdp;
1017
1018                 buf = page_address(xdp_page);
1019                 put_page(page);
1020                 page = xdp_page;
1021         }
1022
1023         xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1024         xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1025                          xdp_headroom, len, true);
1026
1027         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1028
1029         switch (act) {
1030         case XDP_PASS:
1031                 /* Recalculate length in case bpf program changed it */
1032                 len = xdp.data_end - xdp.data;
1033                 metasize = xdp.data - xdp.data_meta;
1034                 break;
1035
1036         case XDP_TX:
1037         case XDP_REDIRECT:
1038                 goto xdp_xmit;
1039
1040         default:
1041                 goto err_xdp;
1042         }
1043
1044         skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1045         if (unlikely(!skb))
1046                 goto err;
1047
1048         if (metasize)
1049                 skb_metadata_set(skb, metasize);
1050
1051         return skb;
1052
1053 err_xdp:
1054         stats->xdp_drops++;
1055 err:
1056         stats->drops++;
1057         put_page(page);
1058 xdp_xmit:
1059         return NULL;
1060 }
1061
1062 static struct sk_buff *receive_small(struct net_device *dev,
1063                                      struct virtnet_info *vi,
1064                                      struct receive_queue *rq,
1065                                      void *buf, void *ctx,
1066                                      unsigned int len,
1067                                      unsigned int *xdp_xmit,
1068                                      struct virtnet_rq_stats *stats)
1069 {
1070         unsigned int xdp_headroom = (unsigned long)ctx;
1071         struct page *page = virt_to_head_page(buf);
1072         struct sk_buff *skb;
1073
1074         len -= vi->hdr_len;
1075         stats->bytes += len;
1076
1077         if (unlikely(len > GOOD_PACKET_LEN)) {
1078                 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1079                          dev->name, len, GOOD_PACKET_LEN);
1080                 dev->stats.rx_length_errors++;
1081                 goto err;
1082         }
1083
1084         if (unlikely(vi->xdp_enabled)) {
1085                 struct bpf_prog *xdp_prog;
1086
1087                 rcu_read_lock();
1088                 xdp_prog = rcu_dereference(rq->xdp_prog);
1089                 if (xdp_prog) {
1090                         skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1091                                                 xdp_headroom, len, xdp_xmit,
1092                                                 stats);
1093                         rcu_read_unlock();
1094                         return skb;
1095                 }
1096                 rcu_read_unlock();
1097         }
1098
1099         skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1100         if (likely(skb))
1101                 return skb;
1102
1103 err:
1104         stats->drops++;
1105         put_page(page);
1106         return NULL;
1107 }
1108
1109 static struct sk_buff *receive_big(struct net_device *dev,
1110                                    struct virtnet_info *vi,
1111                                    struct receive_queue *rq,
1112                                    void *buf,
1113                                    unsigned int len,
1114                                    struct virtnet_rq_stats *stats)
1115 {
1116         struct page *page = buf;
1117         struct sk_buff *skb =
1118                 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1119
1120         stats->bytes += len - vi->hdr_len;
1121         if (unlikely(!skb))
1122                 goto err;
1123
1124         return skb;
1125
1126 err:
1127         stats->drops++;
1128         give_pages(rq, page);
1129         return NULL;
1130 }
1131
1132 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1133                                struct net_device *dev,
1134                                struct virtnet_rq_stats *stats)
1135 {
1136         struct page *page;
1137         void *buf;
1138         int len;
1139
1140         while (num_buf-- > 1) {
1141                 buf = virtqueue_get_buf(rq->vq, &len);
1142                 if (unlikely(!buf)) {
1143                         pr_debug("%s: rx error: %d buffers missing\n",
1144                                  dev->name, num_buf);
1145                         dev->stats.rx_length_errors++;
1146                         break;
1147                 }
1148                 stats->bytes += len;
1149                 page = virt_to_head_page(buf);
1150                 put_page(page);
1151         }
1152 }
1153
1154 /* Why not use xdp_build_skb_from_frame() ?
1155  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1156  * virtio-net there are 2 points that do not match its requirements:
1157  *  1. The size of the prefilled buffer is not fixed before xdp is set.
1158  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1159  *     like eth_type_trans() (which virtio-net does in receive_buf()).
1160  */
1161 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1162                                                struct virtnet_info *vi,
1163                                                struct xdp_buff *xdp,
1164                                                unsigned int xdp_frags_truesz)
1165 {
1166         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1167         unsigned int headroom, data_len;
1168         struct sk_buff *skb;
1169         int metasize;
1170         u8 nr_frags;
1171
1172         if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1173                 pr_debug("Error building skb as missing reserved tailroom for xdp");
1174                 return NULL;
1175         }
1176
1177         if (unlikely(xdp_buff_has_frags(xdp)))
1178                 nr_frags = sinfo->nr_frags;
1179
1180         skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1181         if (unlikely(!skb))
1182                 return NULL;
1183
1184         headroom = xdp->data - xdp->data_hard_start;
1185         data_len = xdp->data_end - xdp->data;
1186         skb_reserve(skb, headroom);
1187         __skb_put(skb, data_len);
1188
1189         metasize = xdp->data - xdp->data_meta;
1190         metasize = metasize > 0 ? metasize : 0;
1191         if (metasize)
1192                 skb_metadata_set(skb, metasize);
1193
1194         if (unlikely(xdp_buff_has_frags(xdp)))
1195                 xdp_update_skb_shared_info(skb, nr_frags,
1196                                            sinfo->xdp_frags_size,
1197                                            xdp_frags_truesz,
1198                                            xdp_buff_is_frag_pfmemalloc(xdp));
1199
1200         return skb;
1201 }
1202
1203 /* TODO: build xdp in big mode */
1204 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1205                                       struct virtnet_info *vi,
1206                                       struct receive_queue *rq,
1207                                       struct xdp_buff *xdp,
1208                                       void *buf,
1209                                       unsigned int len,
1210                                       unsigned int frame_sz,
1211                                       int *num_buf,
1212                                       unsigned int *xdp_frags_truesize,
1213                                       struct virtnet_rq_stats *stats)
1214 {
1215         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1216         unsigned int headroom, tailroom, room;
1217         unsigned int truesize, cur_frag_size;
1218         struct skb_shared_info *shinfo;
1219         unsigned int xdp_frags_truesz = 0;
1220         struct page *page;
1221         skb_frag_t *frag;
1222         int offset;
1223         void *ctx;
1224
1225         xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1226         xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1227                          VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1228
1229         if (!*num_buf)
1230                 return 0;
1231
1232         if (*num_buf > 1) {
1233                 /* If we want to build multi-buffer xdp, we need
1234                  * to specify that the flags of xdp_buff have the
1235                  * XDP_FLAGS_HAS_FRAG bit.
1236                  */
1237                 if (!xdp_buff_has_frags(xdp))
1238                         xdp_buff_set_frags_flag(xdp);
1239
1240                 shinfo = xdp_get_shared_info_from_buff(xdp);
1241                 shinfo->nr_frags = 0;
1242                 shinfo->xdp_frags_size = 0;
1243         }
1244
1245         if (*num_buf > MAX_SKB_FRAGS + 1)
1246                 return -EINVAL;
1247
1248         while (--*num_buf > 0) {
1249                 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1250                 if (unlikely(!buf)) {
1251                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1252                                  dev->name, *num_buf,
1253                                  virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1254                         dev->stats.rx_length_errors++;
1255                         goto err;
1256                 }
1257
1258                 stats->bytes += len;
1259                 page = virt_to_head_page(buf);
1260                 offset = buf - page_address(page);
1261
1262                 truesize = mergeable_ctx_to_truesize(ctx);
1263                 headroom = mergeable_ctx_to_headroom(ctx);
1264                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1265                 room = SKB_DATA_ALIGN(headroom + tailroom);
1266
1267                 cur_frag_size = truesize;
1268                 xdp_frags_truesz += cur_frag_size;
1269                 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1270                         put_page(page);
1271                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1272                                  dev->name, len, (unsigned long)(truesize - room));
1273                         dev->stats.rx_length_errors++;
1274                         goto err;
1275                 }
1276
1277                 frag = &shinfo->frags[shinfo->nr_frags++];
1278                 skb_frag_fill_page_desc(frag, page, offset, len);
1279                 if (page_is_pfmemalloc(page))
1280                         xdp_buff_set_frag_pfmemalloc(xdp);
1281
1282                 shinfo->xdp_frags_size += len;
1283         }
1284
1285         *xdp_frags_truesize = xdp_frags_truesz;
1286         return 0;
1287
1288 err:
1289         put_xdp_frags(xdp);
1290         return -EINVAL;
1291 }
1292
1293 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1294                                    struct receive_queue *rq,
1295                                    struct bpf_prog *xdp_prog,
1296                                    void *ctx,
1297                                    unsigned int *frame_sz,
1298                                    int *num_buf,
1299                                    struct page **page,
1300                                    int offset,
1301                                    unsigned int *len,
1302                                    struct virtio_net_hdr_mrg_rxbuf *hdr)
1303 {
1304         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1305         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1306         struct page *xdp_page;
1307         unsigned int xdp_room;
1308
1309         /* Transient failure which in theory could occur if
1310          * in-flight packets from before XDP was enabled reach
1311          * the receive path after XDP is loaded.
1312          */
1313         if (unlikely(hdr->hdr.gso_type))
1314                 return NULL;
1315
1316         /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1317          * with headroom may add hole in truesize, which
1318          * make their length exceed PAGE_SIZE. So we disabled the
1319          * hole mechanism for xdp. See add_recvbuf_mergeable().
1320          */
1321         *frame_sz = truesize;
1322
1323         if (likely(headroom >= virtnet_get_headroom(vi) &&
1324                    (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1325                 return page_address(*page) + offset;
1326         }
1327
1328         /* This happens when headroom is not enough because
1329          * of the buffer was prefilled before XDP is set.
1330          * This should only happen for the first several packets.
1331          * In fact, vq reset can be used here to help us clean up
1332          * the prefilled buffers, but many existing devices do not
1333          * support it, and we don't want to bother users who are
1334          * using xdp normally.
1335          */
1336         if (!xdp_prog->aux->xdp_has_frags) {
1337                 /* linearize data for XDP */
1338                 xdp_page = xdp_linearize_page(rq, num_buf,
1339                                               *page, offset,
1340                                               VIRTIO_XDP_HEADROOM,
1341                                               len);
1342                 if (!xdp_page)
1343                         return NULL;
1344         } else {
1345                 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1346                                           sizeof(struct skb_shared_info));
1347                 if (*len + xdp_room > PAGE_SIZE)
1348                         return NULL;
1349
1350                 xdp_page = alloc_page(GFP_ATOMIC);
1351                 if (!xdp_page)
1352                         return NULL;
1353
1354                 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1355                        page_address(*page) + offset, *len);
1356         }
1357
1358         *frame_sz = PAGE_SIZE;
1359
1360         put_page(*page);
1361
1362         *page = xdp_page;
1363
1364         return page_address(*page) + VIRTIO_XDP_HEADROOM;
1365 }
1366
1367 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1368                                              struct virtnet_info *vi,
1369                                              struct receive_queue *rq,
1370                                              struct bpf_prog *xdp_prog,
1371                                              void *buf,
1372                                              void *ctx,
1373                                              unsigned int len,
1374                                              unsigned int *xdp_xmit,
1375                                              struct virtnet_rq_stats *stats)
1376 {
1377         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1378         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1379         struct page *page = virt_to_head_page(buf);
1380         int offset = buf - page_address(page);
1381         unsigned int xdp_frags_truesz = 0;
1382         struct sk_buff *head_skb;
1383         unsigned int frame_sz;
1384         struct xdp_buff xdp;
1385         void *data;
1386         u32 act;
1387         int err;
1388
1389         data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1390                                      offset, &len, hdr);
1391         if (unlikely(!data))
1392                 goto err_xdp;
1393
1394         err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1395                                          &num_buf, &xdp_frags_truesz, stats);
1396         if (unlikely(err))
1397                 goto err_xdp;
1398
1399         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1400
1401         switch (act) {
1402         case XDP_PASS:
1403                 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1404                 if (unlikely(!head_skb))
1405                         break;
1406                 return head_skb;
1407
1408         case XDP_TX:
1409         case XDP_REDIRECT:
1410                 return NULL;
1411
1412         default:
1413                 break;
1414         }
1415
1416         put_xdp_frags(&xdp);
1417
1418 err_xdp:
1419         put_page(page);
1420         mergeable_buf_free(rq, num_buf, dev, stats);
1421
1422         stats->xdp_drops++;
1423         stats->drops++;
1424         return NULL;
1425 }
1426
1427 static struct sk_buff *receive_mergeable(struct net_device *dev,
1428                                          struct virtnet_info *vi,
1429                                          struct receive_queue *rq,
1430                                          void *buf,
1431                                          void *ctx,
1432                                          unsigned int len,
1433                                          unsigned int *xdp_xmit,
1434                                          struct virtnet_rq_stats *stats)
1435 {
1436         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1437         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1438         struct page *page = virt_to_head_page(buf);
1439         int offset = buf - page_address(page);
1440         struct sk_buff *head_skb, *curr_skb;
1441         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1442         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1443         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1444         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1445
1446         head_skb = NULL;
1447         stats->bytes += len - vi->hdr_len;
1448
1449         if (unlikely(len > truesize - room)) {
1450                 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1451                          dev->name, len, (unsigned long)(truesize - room));
1452                 dev->stats.rx_length_errors++;
1453                 goto err_skb;
1454         }
1455
1456         if (unlikely(vi->xdp_enabled)) {
1457                 struct bpf_prog *xdp_prog;
1458
1459                 rcu_read_lock();
1460                 xdp_prog = rcu_dereference(rq->xdp_prog);
1461                 if (xdp_prog) {
1462                         head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1463                                                          len, xdp_xmit, stats);
1464                         rcu_read_unlock();
1465                         return head_skb;
1466                 }
1467                 rcu_read_unlock();
1468         }
1469
1470         head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1471         curr_skb = head_skb;
1472
1473         if (unlikely(!curr_skb))
1474                 goto err_skb;
1475         while (--num_buf) {
1476                 int num_skb_frags;
1477
1478                 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1479                 if (unlikely(!buf)) {
1480                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1481                                  dev->name, num_buf,
1482                                  virtio16_to_cpu(vi->vdev,
1483                                                  hdr->num_buffers));
1484                         dev->stats.rx_length_errors++;
1485                         goto err_buf;
1486                 }
1487
1488                 stats->bytes += len;
1489                 page = virt_to_head_page(buf);
1490
1491                 truesize = mergeable_ctx_to_truesize(ctx);
1492                 headroom = mergeable_ctx_to_headroom(ctx);
1493                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1494                 room = SKB_DATA_ALIGN(headroom + tailroom);
1495                 if (unlikely(len > truesize - room)) {
1496                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1497                                  dev->name, len, (unsigned long)(truesize - room));
1498                         dev->stats.rx_length_errors++;
1499                         goto err_skb;
1500                 }
1501
1502                 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1503                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1504                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1505
1506                         if (unlikely(!nskb))
1507                                 goto err_skb;
1508                         if (curr_skb == head_skb)
1509                                 skb_shinfo(curr_skb)->frag_list = nskb;
1510                         else
1511                                 curr_skb->next = nskb;
1512                         curr_skb = nskb;
1513                         head_skb->truesize += nskb->truesize;
1514                         num_skb_frags = 0;
1515                 }
1516                 if (curr_skb != head_skb) {
1517                         head_skb->data_len += len;
1518                         head_skb->len += len;
1519                         head_skb->truesize += truesize;
1520                 }
1521                 offset = buf - page_address(page);
1522                 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1523                         put_page(page);
1524                         skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1525                                              len, truesize);
1526                 } else {
1527                         skb_add_rx_frag(curr_skb, num_skb_frags, page,
1528                                         offset, len, truesize);
1529                 }
1530         }
1531
1532         ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1533         return head_skb;
1534
1535 err_skb:
1536         put_page(page);
1537         mergeable_buf_free(rq, num_buf, dev, stats);
1538
1539 err_buf:
1540         stats->drops++;
1541         dev_kfree_skb(head_skb);
1542         return NULL;
1543 }
1544
1545 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1546                                 struct sk_buff *skb)
1547 {
1548         enum pkt_hash_types rss_hash_type;
1549
1550         if (!hdr_hash || !skb)
1551                 return;
1552
1553         switch (__le16_to_cpu(hdr_hash->hash_report)) {
1554         case VIRTIO_NET_HASH_REPORT_TCPv4:
1555         case VIRTIO_NET_HASH_REPORT_UDPv4:
1556         case VIRTIO_NET_HASH_REPORT_TCPv6:
1557         case VIRTIO_NET_HASH_REPORT_UDPv6:
1558         case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1559         case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1560                 rss_hash_type = PKT_HASH_TYPE_L4;
1561                 break;
1562         case VIRTIO_NET_HASH_REPORT_IPv4:
1563         case VIRTIO_NET_HASH_REPORT_IPv6:
1564         case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1565                 rss_hash_type = PKT_HASH_TYPE_L3;
1566                 break;
1567         case VIRTIO_NET_HASH_REPORT_NONE:
1568         default:
1569                 rss_hash_type = PKT_HASH_TYPE_NONE;
1570         }
1571         skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1572 }
1573
1574 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1575                         void *buf, unsigned int len, void **ctx,
1576                         unsigned int *xdp_xmit,
1577                         struct virtnet_rq_stats *stats)
1578 {
1579         struct net_device *dev = vi->dev;
1580         struct sk_buff *skb;
1581         struct virtio_net_hdr_mrg_rxbuf *hdr;
1582
1583         if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1584                 pr_debug("%s: short packet %i\n", dev->name, len);
1585                 dev->stats.rx_length_errors++;
1586                 virtnet_rq_free_unused_buf(rq->vq, buf);
1587                 return;
1588         }
1589
1590         if (vi->mergeable_rx_bufs)
1591                 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1592                                         stats);
1593         else if (vi->big_packets)
1594                 skb = receive_big(dev, vi, rq, buf, len, stats);
1595         else
1596                 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1597
1598         if (unlikely(!skb))
1599                 return;
1600
1601         hdr = skb_vnet_hdr(skb);
1602         if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1603                 virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
1604
1605         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1606                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1607
1608         if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1609                                   virtio_is_little_endian(vi->vdev))) {
1610                 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1611                                      dev->name, hdr->hdr.gso_type,
1612                                      hdr->hdr.gso_size);
1613                 goto frame_err;
1614         }
1615
1616         skb_record_rx_queue(skb, vq2rxq(rq->vq));
1617         skb->protocol = eth_type_trans(skb, dev);
1618         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1619                  ntohs(skb->protocol), skb->len, skb->pkt_type);
1620
1621         napi_gro_receive(&rq->napi, skb);
1622         return;
1623
1624 frame_err:
1625         dev->stats.rx_frame_errors++;
1626         dev_kfree_skb(skb);
1627 }
1628
1629 /* Unlike mergeable buffers, all buffers are allocated to the
1630  * same size, except for the headroom. For this reason we do
1631  * not need to use  mergeable_len_to_ctx here - it is enough
1632  * to store the headroom as the context ignoring the truesize.
1633  */
1634 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1635                              gfp_t gfp)
1636 {
1637         struct page_frag *alloc_frag = &rq->alloc_frag;
1638         char *buf;
1639         unsigned int xdp_headroom = virtnet_get_headroom(vi);
1640         void *ctx = (void *)(unsigned long)xdp_headroom;
1641         int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1642         int err;
1643
1644         len = SKB_DATA_ALIGN(len) +
1645               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1646         if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1647                 return -ENOMEM;
1648
1649         buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1650         get_page(alloc_frag->page);
1651         alloc_frag->offset += len;
1652         sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1653                     vi->hdr_len + GOOD_PACKET_LEN);
1654         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1655         if (err < 0)
1656                 put_page(virt_to_head_page(buf));
1657         return err;
1658 }
1659
1660 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1661                            gfp_t gfp)
1662 {
1663         struct page *first, *list = NULL;
1664         char *p;
1665         int i, err, offset;
1666
1667         sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1668
1669         /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1670         for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1671                 first = get_a_page(rq, gfp);
1672                 if (!first) {
1673                         if (list)
1674                                 give_pages(rq, list);
1675                         return -ENOMEM;
1676                 }
1677                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1678
1679                 /* chain new page in list head to match sg */
1680                 first->private = (unsigned long)list;
1681                 list = first;
1682         }
1683
1684         first = get_a_page(rq, gfp);
1685         if (!first) {
1686                 give_pages(rq, list);
1687                 return -ENOMEM;
1688         }
1689         p = page_address(first);
1690
1691         /* rq->sg[0], rq->sg[1] share the same page */
1692         /* a separated rq->sg[0] for header - required in case !any_header_sg */
1693         sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1694
1695         /* rq->sg[1] for data packet, from offset */
1696         offset = sizeof(struct padded_vnet_hdr);
1697         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1698
1699         /* chain first in list head */
1700         first->private = (unsigned long)list;
1701         err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1702                                   first, gfp);
1703         if (err < 0)
1704                 give_pages(rq, first);
1705
1706         return err;
1707 }
1708
1709 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1710                                           struct ewma_pkt_len *avg_pkt_len,
1711                                           unsigned int room)
1712 {
1713         struct virtnet_info *vi = rq->vq->vdev->priv;
1714         const size_t hdr_len = vi->hdr_len;
1715         unsigned int len;
1716
1717         if (room)
1718                 return PAGE_SIZE - room;
1719
1720         len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1721                                 rq->min_buf_len, PAGE_SIZE - hdr_len);
1722
1723         return ALIGN(len, L1_CACHE_BYTES);
1724 }
1725
1726 static int add_recvbuf_mergeable(struct virtnet_info *vi,
1727                                  struct receive_queue *rq, gfp_t gfp)
1728 {
1729         struct page_frag *alloc_frag = &rq->alloc_frag;
1730         unsigned int headroom = virtnet_get_headroom(vi);
1731         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1732         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1733         char *buf;
1734         void *ctx;
1735         int err;
1736         unsigned int len, hole;
1737
1738         /* Extra tailroom is needed to satisfy XDP's assumption. This
1739          * means rx frags coalescing won't work, but consider we've
1740          * disabled GSO for XDP, it won't be a big issue.
1741          */
1742         len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1743         if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1744                 return -ENOMEM;
1745
1746         buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1747         buf += headroom; /* advance address leaving hole at front of pkt */
1748         get_page(alloc_frag->page);
1749         alloc_frag->offset += len + room;
1750         hole = alloc_frag->size - alloc_frag->offset;
1751         if (hole < len + room) {
1752                 /* To avoid internal fragmentation, if there is very likely not
1753                  * enough space for another buffer, add the remaining space to
1754                  * the current buffer.
1755                  * XDP core assumes that frame_size of xdp_buff and the length
1756                  * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1757                  */
1758                 if (!headroom)
1759                         len += hole;
1760                 alloc_frag->offset += hole;
1761         }
1762
1763         sg_init_one(rq->sg, buf, len);
1764         ctx = mergeable_len_to_ctx(len + room, headroom);
1765         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1766         if (err < 0)
1767                 put_page(virt_to_head_page(buf));
1768
1769         return err;
1770 }
1771
1772 /*
1773  * Returns false if we couldn't fill entirely (OOM).
1774  *
1775  * Normally run in the receive path, but can also be run from ndo_open
1776  * before we're receiving packets, or from refill_work which is
1777  * careful to disable receiving (using napi_disable).
1778  */
1779 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1780                           gfp_t gfp)
1781 {
1782         int err;
1783         bool oom;
1784
1785         do {
1786                 if (vi->mergeable_rx_bufs)
1787                         err = add_recvbuf_mergeable(vi, rq, gfp);
1788                 else if (vi->big_packets)
1789                         err = add_recvbuf_big(vi, rq, gfp);
1790                 else
1791                         err = add_recvbuf_small(vi, rq, gfp);
1792
1793                 oom = err == -ENOMEM;
1794                 if (err)
1795                         break;
1796         } while (rq->vq->num_free);
1797         if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
1798                 unsigned long flags;
1799
1800                 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1801                 rq->stats.kicks++;
1802                 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
1803         }
1804
1805         return !oom;
1806 }
1807
1808 static void skb_recv_done(struct virtqueue *rvq)
1809 {
1810         struct virtnet_info *vi = rvq->vdev->priv;
1811         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1812
1813         virtqueue_napi_schedule(&rq->napi, rvq);
1814 }
1815
1816 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1817 {
1818         napi_enable(napi);
1819
1820         /* If all buffers were filled by other side before we napi_enabled, we
1821          * won't get another interrupt, so process any outstanding packets now.
1822          * Call local_bh_enable after to trigger softIRQ processing.
1823          */
1824         local_bh_disable();
1825         virtqueue_napi_schedule(napi, vq);
1826         local_bh_enable();
1827 }
1828
1829 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1830                                    struct virtqueue *vq,
1831                                    struct napi_struct *napi)
1832 {
1833         if (!napi->weight)
1834                 return;
1835
1836         /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1837          * enable the feature if this is likely affine with the transmit path.
1838          */
1839         if (!vi->affinity_hint_set) {
1840                 napi->weight = 0;
1841                 return;
1842         }
1843
1844         return virtnet_napi_enable(vq, napi);
1845 }
1846
1847 static void virtnet_napi_tx_disable(struct napi_struct *napi)
1848 {
1849         if (napi->weight)
1850                 napi_disable(napi);
1851 }
1852
1853 static void refill_work(struct work_struct *work)
1854 {
1855         struct virtnet_info *vi =
1856                 container_of(work, struct virtnet_info, refill.work);
1857         bool still_empty;
1858         int i;
1859
1860         for (i = 0; i < vi->curr_queue_pairs; i++) {
1861                 struct receive_queue *rq = &vi->rq[i];
1862
1863                 napi_disable(&rq->napi);
1864                 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1865                 virtnet_napi_enable(rq->vq, &rq->napi);
1866
1867                 /* In theory, this can happen: if we don't get any buffers in
1868                  * we will *never* try to fill again.
1869                  */
1870                 if (still_empty)
1871                         schedule_delayed_work(&vi->refill, HZ/2);
1872         }
1873 }
1874
1875 static int virtnet_receive(struct receive_queue *rq, int budget,
1876                            unsigned int *xdp_xmit)
1877 {
1878         struct virtnet_info *vi = rq->vq->vdev->priv;
1879         struct virtnet_rq_stats stats = {};
1880         unsigned int len;
1881         void *buf;
1882         int i;
1883
1884         if (!vi->big_packets || vi->mergeable_rx_bufs) {
1885                 void *ctx;
1886
1887                 while (stats.packets < budget &&
1888                        (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1889                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
1890                         stats.packets++;
1891                 }
1892         } else {
1893                 while (stats.packets < budget &&
1894                        (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1895                         receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
1896                         stats.packets++;
1897                 }
1898         }
1899
1900         if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1901                 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
1902                         spin_lock(&vi->refill_lock);
1903                         if (vi->refill_enabled)
1904                                 schedule_delayed_work(&vi->refill, 0);
1905                         spin_unlock(&vi->refill_lock);
1906                 }
1907         }
1908
1909         u64_stats_update_begin(&rq->stats.syncp);
1910         for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
1911                 size_t offset = virtnet_rq_stats_desc[i].offset;
1912                 u64 *item;
1913
1914                 item = (u64 *)((u8 *)&rq->stats + offset);
1915                 *item += *(u64 *)((u8 *)&stats + offset);
1916         }
1917         u64_stats_update_end(&rq->stats.syncp);
1918
1919         return stats.packets;
1920 }
1921
1922 static void virtnet_poll_cleantx(struct receive_queue *rq)
1923 {
1924         struct virtnet_info *vi = rq->vq->vdev->priv;
1925         unsigned int index = vq2rxq(rq->vq);
1926         struct send_queue *sq = &vi->sq[index];
1927         struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1928
1929         if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1930                 return;
1931
1932         if (__netif_tx_trylock(txq)) {
1933                 if (sq->reset) {
1934                         __netif_tx_unlock(txq);
1935                         return;
1936                 }
1937
1938                 do {
1939                         virtqueue_disable_cb(sq->vq);
1940                         free_old_xmit_skbs(sq, true);
1941                 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1942
1943                 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1944                         netif_tx_wake_queue(txq);
1945
1946                 __netif_tx_unlock(txq);
1947         }
1948 }
1949
1950 static int virtnet_poll(struct napi_struct *napi, int budget)
1951 {
1952         struct receive_queue *rq =
1953                 container_of(napi, struct receive_queue, napi);
1954         struct virtnet_info *vi = rq->vq->vdev->priv;
1955         struct send_queue *sq;
1956         unsigned int received;
1957         unsigned int xdp_xmit = 0;
1958
1959         virtnet_poll_cleantx(rq);
1960
1961         received = virtnet_receive(rq, budget, &xdp_xmit);
1962
1963         if (xdp_xmit & VIRTIO_XDP_REDIR)
1964                 xdp_do_flush();
1965
1966         /* Out of packets? */
1967         if (received < budget)
1968                 virtqueue_napi_complete(napi, rq->vq, received);
1969
1970         if (xdp_xmit & VIRTIO_XDP_TX) {
1971                 sq = virtnet_xdp_get_sq(vi);
1972                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1973                         u64_stats_update_begin(&sq->stats.syncp);
1974                         sq->stats.kicks++;
1975                         u64_stats_update_end(&sq->stats.syncp);
1976                 }
1977                 virtnet_xdp_put_sq(vi, sq);
1978         }
1979
1980         return received;
1981 }
1982
1983 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
1984 {
1985         virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
1986         napi_disable(&vi->rq[qp_index].napi);
1987         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
1988 }
1989
1990 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
1991 {
1992         struct net_device *dev = vi->dev;
1993         int err;
1994
1995         err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
1996                                vi->rq[qp_index].napi.napi_id);
1997         if (err < 0)
1998                 return err;
1999
2000         err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2001                                          MEM_TYPE_PAGE_SHARED, NULL);
2002         if (err < 0)
2003                 goto err_xdp_reg_mem_model;
2004
2005         virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2006         virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2007
2008         return 0;
2009
2010 err_xdp_reg_mem_model:
2011         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2012         return err;
2013 }
2014
2015 static int virtnet_open(struct net_device *dev)
2016 {
2017         struct virtnet_info *vi = netdev_priv(dev);
2018         int i, err;
2019
2020         enable_delayed_refill(vi);
2021
2022         for (i = 0; i < vi->max_queue_pairs; i++) {
2023                 if (i < vi->curr_queue_pairs)
2024                         /* Make sure we have some buffers: if oom use wq. */
2025                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2026                                 schedule_delayed_work(&vi->refill, 0);
2027
2028                 err = virtnet_enable_queue_pair(vi, i);
2029                 if (err < 0)
2030                         goto err_enable_qp;
2031         }
2032
2033         return 0;
2034
2035 err_enable_qp:
2036         disable_delayed_refill(vi);
2037         cancel_delayed_work_sync(&vi->refill);
2038
2039         for (i--; i >= 0; i--)
2040                 virtnet_disable_queue_pair(vi, i);
2041         return err;
2042 }
2043
2044 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2045 {
2046         struct send_queue *sq = container_of(napi, struct send_queue, napi);
2047         struct virtnet_info *vi = sq->vq->vdev->priv;
2048         unsigned int index = vq2txq(sq->vq);
2049         struct netdev_queue *txq;
2050         int opaque;
2051         bool done;
2052
2053         if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2054                 /* We don't need to enable cb for XDP */
2055                 napi_complete_done(napi, 0);
2056                 return 0;
2057         }
2058
2059         txq = netdev_get_tx_queue(vi->dev, index);
2060         __netif_tx_lock(txq, raw_smp_processor_id());
2061         virtqueue_disable_cb(sq->vq);
2062         free_old_xmit_skbs(sq, true);
2063
2064         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2065                 netif_tx_wake_queue(txq);
2066
2067         opaque = virtqueue_enable_cb_prepare(sq->vq);
2068
2069         done = napi_complete_done(napi, 0);
2070
2071         if (!done)
2072                 virtqueue_disable_cb(sq->vq);
2073
2074         __netif_tx_unlock(txq);
2075
2076         if (done) {
2077                 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2078                         if (napi_schedule_prep(napi)) {
2079                                 __netif_tx_lock(txq, raw_smp_processor_id());
2080                                 virtqueue_disable_cb(sq->vq);
2081                                 __netif_tx_unlock(txq);
2082                                 __napi_schedule(napi);
2083                         }
2084                 }
2085         }
2086
2087         return 0;
2088 }
2089
2090 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2091 {
2092         struct virtio_net_hdr_mrg_rxbuf *hdr;
2093         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2094         struct virtnet_info *vi = sq->vq->vdev->priv;
2095         int num_sg;
2096         unsigned hdr_len = vi->hdr_len;
2097         bool can_push;
2098
2099         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2100
2101         can_push = vi->any_header_sg &&
2102                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2103                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2104         /* Even if we can, don't push here yet as this would skew
2105          * csum_start offset below. */
2106         if (can_push)
2107                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2108         else
2109                 hdr = skb_vnet_hdr(skb);
2110
2111         if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2112                                     virtio_is_little_endian(vi->vdev), false,
2113                                     0))
2114                 return -EPROTO;
2115
2116         if (vi->mergeable_rx_bufs)
2117                 hdr->num_buffers = 0;
2118
2119         sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2120         if (can_push) {
2121                 __skb_push(skb, hdr_len);
2122                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2123                 if (unlikely(num_sg < 0))
2124                         return num_sg;
2125                 /* Pull header back to avoid skew in tx bytes calculations. */
2126                 __skb_pull(skb, hdr_len);
2127         } else {
2128                 sg_set_buf(sq->sg, hdr, hdr_len);
2129                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2130                 if (unlikely(num_sg < 0))
2131                         return num_sg;
2132                 num_sg++;
2133         }
2134         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2135 }
2136
2137 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2138 {
2139         struct virtnet_info *vi = netdev_priv(dev);
2140         int qnum = skb_get_queue_mapping(skb);
2141         struct send_queue *sq = &vi->sq[qnum];
2142         int err;
2143         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2144         bool kick = !netdev_xmit_more();
2145         bool use_napi = sq->napi.weight;
2146
2147         /* Free up any pending old buffers before queueing new ones. */
2148         do {
2149                 if (use_napi)
2150                         virtqueue_disable_cb(sq->vq);
2151
2152                 free_old_xmit_skbs(sq, false);
2153
2154         } while (use_napi && kick &&
2155                unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2156
2157         /* timestamp packet in software */
2158         skb_tx_timestamp(skb);
2159
2160         /* Try to transmit */
2161         err = xmit_skb(sq, skb);
2162
2163         /* This should not happen! */
2164         if (unlikely(err)) {
2165                 dev->stats.tx_fifo_errors++;
2166                 if (net_ratelimit())
2167                         dev_warn(&dev->dev,
2168                                  "Unexpected TXQ (%d) queue failure: %d\n",
2169                                  qnum, err);
2170                 dev->stats.tx_dropped++;
2171                 dev_kfree_skb_any(skb);
2172                 return NETDEV_TX_OK;
2173         }
2174
2175         /* Don't wait up for transmitted skbs to be freed. */
2176         if (!use_napi) {
2177                 skb_orphan(skb);
2178                 nf_reset_ct(skb);
2179         }
2180
2181         check_sq_full_and_disable(vi, dev, sq);
2182
2183         if (kick || netif_xmit_stopped(txq)) {
2184                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2185                         u64_stats_update_begin(&sq->stats.syncp);
2186                         sq->stats.kicks++;
2187                         u64_stats_update_end(&sq->stats.syncp);
2188                 }
2189         }
2190
2191         return NETDEV_TX_OK;
2192 }
2193
2194 static int virtnet_rx_resize(struct virtnet_info *vi,
2195                              struct receive_queue *rq, u32 ring_num)
2196 {
2197         bool running = netif_running(vi->dev);
2198         int err, qindex;
2199
2200         qindex = rq - vi->rq;
2201
2202         if (running)
2203                 napi_disable(&rq->napi);
2204
2205         err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
2206         if (err)
2207                 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2208
2209         if (!try_fill_recv(vi, rq, GFP_KERNEL))
2210                 schedule_delayed_work(&vi->refill, 0);
2211
2212         if (running)
2213                 virtnet_napi_enable(rq->vq, &rq->napi);
2214         return err;
2215 }
2216
2217 static int virtnet_tx_resize(struct virtnet_info *vi,
2218                              struct send_queue *sq, u32 ring_num)
2219 {
2220         bool running = netif_running(vi->dev);
2221         struct netdev_queue *txq;
2222         int err, qindex;
2223
2224         qindex = sq - vi->sq;
2225
2226         if (running)
2227                 virtnet_napi_tx_disable(&sq->napi);
2228
2229         txq = netdev_get_tx_queue(vi->dev, qindex);
2230
2231         /* 1. wait all ximt complete
2232          * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2233          */
2234         __netif_tx_lock_bh(txq);
2235
2236         /* Prevent rx poll from accessing sq. */
2237         sq->reset = true;
2238
2239         /* Prevent the upper layer from trying to send packets. */
2240         netif_stop_subqueue(vi->dev, qindex);
2241
2242         __netif_tx_unlock_bh(txq);
2243
2244         err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2245         if (err)
2246                 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2247
2248         __netif_tx_lock_bh(txq);
2249         sq->reset = false;
2250         netif_tx_wake_queue(txq);
2251         __netif_tx_unlock_bh(txq);
2252
2253         if (running)
2254                 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2255         return err;
2256 }
2257
2258 /*
2259  * Send command via the control virtqueue and check status.  Commands
2260  * supported by the hypervisor, as indicated by feature bits, should
2261  * never fail unless improperly formatted.
2262  */
2263 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2264                                  struct scatterlist *out)
2265 {
2266         struct scatterlist *sgs[4], hdr, stat;
2267         unsigned out_num = 0, tmp;
2268         int ret;
2269
2270         /* Caller should know better */
2271         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2272
2273         vi->ctrl->status = ~0;
2274         vi->ctrl->hdr.class = class;
2275         vi->ctrl->hdr.cmd = cmd;
2276         /* Add header */
2277         sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2278         sgs[out_num++] = &hdr;
2279
2280         if (out)
2281                 sgs[out_num++] = out;
2282
2283         /* Add return status. */
2284         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2285         sgs[out_num] = &stat;
2286
2287         BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2288         ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2289         if (ret < 0) {
2290                 dev_warn(&vi->vdev->dev,
2291                          "Failed to add sgs for command vq: %d\n.", ret);
2292                 return false;
2293         }
2294
2295         if (unlikely(!virtqueue_kick(vi->cvq)))
2296                 return vi->ctrl->status == VIRTIO_NET_OK;
2297
2298         /* Spin for a response, the kick causes an ioport write, trapping
2299          * into the hypervisor, so the request should be handled immediately.
2300          */
2301         while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2302                !virtqueue_is_broken(vi->cvq))
2303                 cpu_relax();
2304
2305         return vi->ctrl->status == VIRTIO_NET_OK;
2306 }
2307
2308 static int virtnet_set_mac_address(struct net_device *dev, void *p)
2309 {
2310         struct virtnet_info *vi = netdev_priv(dev);
2311         struct virtio_device *vdev = vi->vdev;
2312         int ret;
2313         struct sockaddr *addr;
2314         struct scatterlist sg;
2315
2316         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2317                 return -EOPNOTSUPP;
2318
2319         addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2320         if (!addr)
2321                 return -ENOMEM;
2322
2323         ret = eth_prepare_mac_addr_change(dev, addr);
2324         if (ret)
2325                 goto out;
2326
2327         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2328                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2329                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2330                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2331                         dev_warn(&vdev->dev,
2332                                  "Failed to set mac address by vq command.\n");
2333                         ret = -EINVAL;
2334                         goto out;
2335                 }
2336         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2337                    !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2338                 unsigned int i;
2339
2340                 /* Naturally, this has an atomicity problem. */
2341                 for (i = 0; i < dev->addr_len; i++)
2342                         virtio_cwrite8(vdev,
2343                                        offsetof(struct virtio_net_config, mac) +
2344                                        i, addr->sa_data[i]);
2345         }
2346
2347         eth_commit_mac_addr_change(dev, p);
2348         ret = 0;
2349
2350 out:
2351         kfree(addr);
2352         return ret;
2353 }
2354
2355 static void virtnet_stats(struct net_device *dev,
2356                           struct rtnl_link_stats64 *tot)
2357 {
2358         struct virtnet_info *vi = netdev_priv(dev);
2359         unsigned int start;
2360         int i;
2361
2362         for (i = 0; i < vi->max_queue_pairs; i++) {
2363                 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2364                 struct receive_queue *rq = &vi->rq[i];
2365                 struct send_queue *sq = &vi->sq[i];
2366
2367                 do {
2368                         start = u64_stats_fetch_begin(&sq->stats.syncp);
2369                         tpackets = sq->stats.packets;
2370                         tbytes   = sq->stats.bytes;
2371                         terrors  = sq->stats.tx_timeouts;
2372                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2373
2374                 do {
2375                         start = u64_stats_fetch_begin(&rq->stats.syncp);
2376                         rpackets = rq->stats.packets;
2377                         rbytes   = rq->stats.bytes;
2378                         rdrops   = rq->stats.drops;
2379                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2380
2381                 tot->rx_packets += rpackets;
2382                 tot->tx_packets += tpackets;
2383                 tot->rx_bytes   += rbytes;
2384                 tot->tx_bytes   += tbytes;
2385                 tot->rx_dropped += rdrops;
2386                 tot->tx_errors  += terrors;
2387         }
2388
2389         tot->tx_dropped = dev->stats.tx_dropped;
2390         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
2391         tot->rx_length_errors = dev->stats.rx_length_errors;
2392         tot->rx_frame_errors = dev->stats.rx_frame_errors;
2393 }
2394
2395 static void virtnet_ack_link_announce(struct virtnet_info *vi)
2396 {
2397         rtnl_lock();
2398         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2399                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2400                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2401         rtnl_unlock();
2402 }
2403
2404 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2405 {
2406         struct scatterlist sg;
2407         struct net_device *dev = vi->dev;
2408
2409         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2410                 return 0;
2411
2412         vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2413         sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2414
2415         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2416                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2417                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2418                          queue_pairs);
2419                 return -EINVAL;
2420         } else {
2421                 vi->curr_queue_pairs = queue_pairs;
2422                 /* virtnet_open() will refill when device is going to up. */
2423                 if (dev->flags & IFF_UP)
2424                         schedule_delayed_work(&vi->refill, 0);
2425         }
2426
2427         return 0;
2428 }
2429
2430 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2431 {
2432         int err;
2433
2434         rtnl_lock();
2435         err = _virtnet_set_queues(vi, queue_pairs);
2436         rtnl_unlock();
2437         return err;
2438 }
2439
2440 static int virtnet_close(struct net_device *dev)
2441 {
2442         struct virtnet_info *vi = netdev_priv(dev);
2443         int i;
2444
2445         /* Make sure NAPI doesn't schedule refill work */
2446         disable_delayed_refill(vi);
2447         /* Make sure refill_work doesn't re-enable napi! */
2448         cancel_delayed_work_sync(&vi->refill);
2449
2450         for (i = 0; i < vi->max_queue_pairs; i++)
2451                 virtnet_disable_queue_pair(vi, i);
2452
2453         return 0;
2454 }
2455
2456 static void virtnet_set_rx_mode(struct net_device *dev)
2457 {
2458         struct virtnet_info *vi = netdev_priv(dev);
2459         struct scatterlist sg[2];
2460         struct virtio_net_ctrl_mac *mac_data;
2461         struct netdev_hw_addr *ha;
2462         int uc_count;
2463         int mc_count;
2464         void *buf;
2465         int i;
2466
2467         /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2468         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2469                 return;
2470
2471         vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2472         vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2473
2474         sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2475
2476         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2477                                   VIRTIO_NET_CTRL_RX_PROMISC, sg))
2478                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2479                          vi->ctrl->promisc ? "en" : "dis");
2480
2481         sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2482
2483         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2484                                   VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2485                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2486                          vi->ctrl->allmulti ? "en" : "dis");
2487
2488         uc_count = netdev_uc_count(dev);
2489         mc_count = netdev_mc_count(dev);
2490         /* MAC filter - use one buffer for both lists */
2491         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2492                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2493         mac_data = buf;
2494         if (!buf)
2495                 return;
2496
2497         sg_init_table(sg, 2);
2498
2499         /* Store the unicast list and count in the front of the buffer */
2500         mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2501         i = 0;
2502         netdev_for_each_uc_addr(ha, dev)
2503                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2504
2505         sg_set_buf(&sg[0], mac_data,
2506                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2507
2508         /* multicast list and count fill the end */
2509         mac_data = (void *)&mac_data->macs[uc_count][0];
2510
2511         mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2512         i = 0;
2513         netdev_for_each_mc_addr(ha, dev)
2514                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2515
2516         sg_set_buf(&sg[1], mac_data,
2517                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2518
2519         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2520                                   VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2521                 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2522
2523         kfree(buf);
2524 }
2525
2526 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2527                                    __be16 proto, u16 vid)
2528 {
2529         struct virtnet_info *vi = netdev_priv(dev);
2530         struct scatterlist sg;
2531
2532         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2533         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2534
2535         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2536                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2537                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2538         return 0;
2539 }
2540
2541 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2542                                     __be16 proto, u16 vid)
2543 {
2544         struct virtnet_info *vi = netdev_priv(dev);
2545         struct scatterlist sg;
2546
2547         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2548         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2549
2550         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2551                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2552                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2553         return 0;
2554 }
2555
2556 static void virtnet_clean_affinity(struct virtnet_info *vi)
2557 {
2558         int i;
2559
2560         if (vi->affinity_hint_set) {
2561                 for (i = 0; i < vi->max_queue_pairs; i++) {
2562                         virtqueue_set_affinity(vi->rq[i].vq, NULL);
2563                         virtqueue_set_affinity(vi->sq[i].vq, NULL);
2564                 }
2565
2566                 vi->affinity_hint_set = false;
2567         }
2568 }
2569
2570 static void virtnet_set_affinity(struct virtnet_info *vi)
2571 {
2572         cpumask_var_t mask;
2573         int stragglers;
2574         int group_size;
2575         int i, j, cpu;
2576         int num_cpu;
2577         int stride;
2578
2579         if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2580                 virtnet_clean_affinity(vi);
2581                 return;
2582         }
2583
2584         num_cpu = num_online_cpus();
2585         stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2586         stragglers = num_cpu >= vi->curr_queue_pairs ?
2587                         num_cpu % vi->curr_queue_pairs :
2588                         0;
2589         cpu = cpumask_first(cpu_online_mask);
2590
2591         for (i = 0; i < vi->curr_queue_pairs; i++) {
2592                 group_size = stride + (i < stragglers ? 1 : 0);
2593
2594                 for (j = 0; j < group_size; j++) {
2595                         cpumask_set_cpu(cpu, mask);
2596                         cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2597                                                 nr_cpu_ids, false);
2598                 }
2599                 virtqueue_set_affinity(vi->rq[i].vq, mask);
2600                 virtqueue_set_affinity(vi->sq[i].vq, mask);
2601                 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2602                 cpumask_clear(mask);
2603         }
2604
2605         vi->affinity_hint_set = true;
2606         free_cpumask_var(mask);
2607 }
2608
2609 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2610 {
2611         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2612                                                    node);
2613         virtnet_set_affinity(vi);
2614         return 0;
2615 }
2616
2617 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2618 {
2619         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2620                                                    node_dead);
2621         virtnet_set_affinity(vi);
2622         return 0;
2623 }
2624
2625 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2626 {
2627         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2628                                                    node);
2629
2630         virtnet_clean_affinity(vi);
2631         return 0;
2632 }
2633
2634 static enum cpuhp_state virtionet_online;
2635
2636 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2637 {
2638         int ret;
2639
2640         ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2641         if (ret)
2642                 return ret;
2643         ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2644                                                &vi->node_dead);
2645         if (!ret)
2646                 return ret;
2647         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2648         return ret;
2649 }
2650
2651 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2652 {
2653         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2654         cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2655                                             &vi->node_dead);
2656 }
2657
2658 static void virtnet_get_ringparam(struct net_device *dev,
2659                                   struct ethtool_ringparam *ring,
2660                                   struct kernel_ethtool_ringparam *kernel_ring,
2661                                   struct netlink_ext_ack *extack)
2662 {
2663         struct virtnet_info *vi = netdev_priv(dev);
2664
2665         ring->rx_max_pending = vi->rq[0].vq->num_max;
2666         ring->tx_max_pending = vi->sq[0].vq->num_max;
2667         ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2668         ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2669 }
2670
2671 static int virtnet_set_ringparam(struct net_device *dev,
2672                                  struct ethtool_ringparam *ring,
2673                                  struct kernel_ethtool_ringparam *kernel_ring,
2674                                  struct netlink_ext_ack *extack)
2675 {
2676         struct virtnet_info *vi = netdev_priv(dev);
2677         u32 rx_pending, tx_pending;
2678         struct receive_queue *rq;
2679         struct send_queue *sq;
2680         int i, err;
2681
2682         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2683                 return -EINVAL;
2684
2685         rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2686         tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2687
2688         if (ring->rx_pending == rx_pending &&
2689             ring->tx_pending == tx_pending)
2690                 return 0;
2691
2692         if (ring->rx_pending > vi->rq[0].vq->num_max)
2693                 return -EINVAL;
2694
2695         if (ring->tx_pending > vi->sq[0].vq->num_max)
2696                 return -EINVAL;
2697
2698         for (i = 0; i < vi->max_queue_pairs; i++) {
2699                 rq = vi->rq + i;
2700                 sq = vi->sq + i;
2701
2702                 if (ring->tx_pending != tx_pending) {
2703                         err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2704                         if (err)
2705                                 return err;
2706                 }
2707
2708                 if (ring->rx_pending != rx_pending) {
2709                         err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2710                         if (err)
2711                                 return err;
2712                 }
2713         }
2714
2715         return 0;
2716 }
2717
2718 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2719 {
2720         struct net_device *dev = vi->dev;
2721         struct scatterlist sgs[4];
2722         unsigned int sg_buf_size;
2723
2724         /* prepare sgs */
2725         sg_init_table(sgs, 4);
2726
2727         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
2728         sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2729
2730         sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2731         sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2732
2733         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
2734                         - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
2735         sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2736
2737         sg_buf_size = vi->rss_key_size;
2738         sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2739
2740         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2741                                   vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
2742                                   : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
2743                 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
2744                 return false;
2745         }
2746         return true;
2747 }
2748
2749 static void virtnet_init_default_rss(struct virtnet_info *vi)
2750 {
2751         u32 indir_val = 0;
2752         int i = 0;
2753
2754         vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
2755         vi->rss_hash_types_saved = vi->rss_hash_types_supported;
2756         vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
2757                                                 ? vi->rss_indir_table_size - 1 : 0;
2758         vi->ctrl->rss.unclassified_queue = 0;
2759
2760         for (; i < vi->rss_indir_table_size; ++i) {
2761                 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
2762                 vi->ctrl->rss.indirection_table[i] = indir_val;
2763         }
2764
2765         vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
2766         vi->ctrl->rss.hash_key_length = vi->rss_key_size;
2767
2768         netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
2769 }
2770
2771 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
2772 {
2773         info->data = 0;
2774         switch (info->flow_type) {
2775         case TCP_V4_FLOW:
2776                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
2777                         info->data = RXH_IP_SRC | RXH_IP_DST |
2778                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2779                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2780                         info->data = RXH_IP_SRC | RXH_IP_DST;
2781                 }
2782                 break;
2783         case TCP_V6_FLOW:
2784                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
2785                         info->data = RXH_IP_SRC | RXH_IP_DST |
2786                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2787                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2788                         info->data = RXH_IP_SRC | RXH_IP_DST;
2789                 }
2790                 break;
2791         case UDP_V4_FLOW:
2792                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
2793                         info->data = RXH_IP_SRC | RXH_IP_DST |
2794                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2795                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2796                         info->data = RXH_IP_SRC | RXH_IP_DST;
2797                 }
2798                 break;
2799         case UDP_V6_FLOW:
2800                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
2801                         info->data = RXH_IP_SRC | RXH_IP_DST |
2802                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
2803                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2804                         info->data = RXH_IP_SRC | RXH_IP_DST;
2805                 }
2806                 break;
2807         case IPV4_FLOW:
2808                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
2809                         info->data = RXH_IP_SRC | RXH_IP_DST;
2810
2811                 break;
2812         case IPV6_FLOW:
2813                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
2814                         info->data = RXH_IP_SRC | RXH_IP_DST;
2815
2816                 break;
2817         default:
2818                 info->data = 0;
2819                 break;
2820         }
2821 }
2822
2823 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
2824 {
2825         u32 new_hashtypes = vi->rss_hash_types_saved;
2826         bool is_disable = info->data & RXH_DISCARD;
2827         bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
2828
2829         /* supports only 'sd', 'sdfn' and 'r' */
2830         if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
2831                 return false;
2832
2833         switch (info->flow_type) {
2834         case TCP_V4_FLOW:
2835                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
2836                 if (!is_disable)
2837                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
2838                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
2839                 break;
2840         case UDP_V4_FLOW:
2841                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
2842                 if (!is_disable)
2843                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
2844                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
2845                 break;
2846         case IPV4_FLOW:
2847                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
2848                 if (!is_disable)
2849                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
2850                 break;
2851         case TCP_V6_FLOW:
2852                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
2853                 if (!is_disable)
2854                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
2855                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
2856                 break;
2857         case UDP_V6_FLOW:
2858                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
2859                 if (!is_disable)
2860                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
2861                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
2862                 break;
2863         case IPV6_FLOW:
2864                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
2865                 if (!is_disable)
2866                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
2867                 break;
2868         default:
2869                 /* unsupported flow */
2870                 return false;
2871         }
2872
2873         /* if unsupported hashtype was set */
2874         if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
2875                 return false;
2876
2877         if (new_hashtypes != vi->rss_hash_types_saved) {
2878                 vi->rss_hash_types_saved = new_hashtypes;
2879                 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
2880                 if (vi->dev->features & NETIF_F_RXHASH)
2881                         return virtnet_commit_rss_command(vi);
2882         }
2883
2884         return true;
2885 }
2886
2887 static void virtnet_get_drvinfo(struct net_device *dev,
2888                                 struct ethtool_drvinfo *info)
2889 {
2890         struct virtnet_info *vi = netdev_priv(dev);
2891         struct virtio_device *vdev = vi->vdev;
2892
2893         strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
2894         strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
2895         strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
2896
2897 }
2898
2899 /* TODO: Eliminate OOO packets during switching */
2900 static int virtnet_set_channels(struct net_device *dev,
2901                                 struct ethtool_channels *channels)
2902 {
2903         struct virtnet_info *vi = netdev_priv(dev);
2904         u16 queue_pairs = channels->combined_count;
2905         int err;
2906
2907         /* We don't support separate rx/tx channels.
2908          * We don't allow setting 'other' channels.
2909          */
2910         if (channels->rx_count || channels->tx_count || channels->other_count)
2911                 return -EINVAL;
2912
2913         if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2914                 return -EINVAL;
2915
2916         /* For now we don't support modifying channels while XDP is loaded
2917          * also when XDP is loaded all RX queues have XDP programs so we only
2918          * need to check a single RX queue.
2919          */
2920         if (vi->rq[0].xdp_prog)
2921                 return -EINVAL;
2922
2923         cpus_read_lock();
2924         err = _virtnet_set_queues(vi, queue_pairs);
2925         if (err) {
2926                 cpus_read_unlock();
2927                 goto err;
2928         }
2929         virtnet_set_affinity(vi);
2930         cpus_read_unlock();
2931
2932         netif_set_real_num_tx_queues(dev, queue_pairs);
2933         netif_set_real_num_rx_queues(dev, queue_pairs);
2934  err:
2935         return err;
2936 }
2937
2938 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2939 {
2940         struct virtnet_info *vi = netdev_priv(dev);
2941         unsigned int i, j;
2942         u8 *p = data;
2943
2944         switch (stringset) {
2945         case ETH_SS_STATS:
2946                 for (i = 0; i < vi->curr_queue_pairs; i++) {
2947                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
2948                                 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
2949                                                 virtnet_rq_stats_desc[j].desc);
2950                 }
2951
2952                 for (i = 0; i < vi->curr_queue_pairs; i++) {
2953                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
2954                                 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
2955                                                 virtnet_sq_stats_desc[j].desc);
2956                 }
2957                 break;
2958         }
2959 }
2960
2961 static int virtnet_get_sset_count(struct net_device *dev, int sset)
2962 {
2963         struct virtnet_info *vi = netdev_priv(dev);
2964
2965         switch (sset) {
2966         case ETH_SS_STATS:
2967                 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2968                                                VIRTNET_SQ_STATS_LEN);
2969         default:
2970                 return -EOPNOTSUPP;
2971         }
2972 }
2973
2974 static void virtnet_get_ethtool_stats(struct net_device *dev,
2975                                       struct ethtool_stats *stats, u64 *data)
2976 {
2977         struct virtnet_info *vi = netdev_priv(dev);
2978         unsigned int idx = 0, start, i, j;
2979         const u8 *stats_base;
2980         size_t offset;
2981
2982         for (i = 0; i < vi->curr_queue_pairs; i++) {
2983                 struct receive_queue *rq = &vi->rq[i];
2984
2985                 stats_base = (u8 *)&rq->stats;
2986                 do {
2987                         start = u64_stats_fetch_begin(&rq->stats.syncp);
2988                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2989                                 offset = virtnet_rq_stats_desc[j].offset;
2990                                 data[idx + j] = *(u64 *)(stats_base + offset);
2991                         }
2992                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2993                 idx += VIRTNET_RQ_STATS_LEN;
2994         }
2995
2996         for (i = 0; i < vi->curr_queue_pairs; i++) {
2997                 struct send_queue *sq = &vi->sq[i];
2998
2999                 stats_base = (u8 *)&sq->stats;
3000                 do {
3001                         start = u64_stats_fetch_begin(&sq->stats.syncp);
3002                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3003                                 offset = virtnet_sq_stats_desc[j].offset;
3004                                 data[idx + j] = *(u64 *)(stats_base + offset);
3005                         }
3006                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3007                 idx += VIRTNET_SQ_STATS_LEN;
3008         }
3009 }
3010
3011 static void virtnet_get_channels(struct net_device *dev,
3012                                  struct ethtool_channels *channels)
3013 {
3014         struct virtnet_info *vi = netdev_priv(dev);
3015
3016         channels->combined_count = vi->curr_queue_pairs;
3017         channels->max_combined = vi->max_queue_pairs;
3018         channels->max_other = 0;
3019         channels->rx_count = 0;
3020         channels->tx_count = 0;
3021         channels->other_count = 0;
3022 }
3023
3024 static int virtnet_set_link_ksettings(struct net_device *dev,
3025                                       const struct ethtool_link_ksettings *cmd)
3026 {
3027         struct virtnet_info *vi = netdev_priv(dev);
3028
3029         return ethtool_virtdev_set_link_ksettings(dev, cmd,
3030                                                   &vi->speed, &vi->duplex);
3031 }
3032
3033 static int virtnet_get_link_ksettings(struct net_device *dev,
3034                                       struct ethtool_link_ksettings *cmd)
3035 {
3036         struct virtnet_info *vi = netdev_priv(dev);
3037
3038         cmd->base.speed = vi->speed;
3039         cmd->base.duplex = vi->duplex;
3040         cmd->base.port = PORT_OTHER;
3041
3042         return 0;
3043 }
3044
3045 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3046                                        struct ethtool_coalesce *ec)
3047 {
3048         struct scatterlist sgs_tx, sgs_rx;
3049
3050         vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3051         vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3052         sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3053
3054         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3055                                   VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3056                                   &sgs_tx))
3057                 return -EINVAL;
3058
3059         /* Save parameters */
3060         vi->tx_usecs = ec->tx_coalesce_usecs;
3061         vi->tx_max_packets = ec->tx_max_coalesced_frames;
3062
3063         vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3064         vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3065         sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3066
3067         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3068                                   VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3069                                   &sgs_rx))
3070                 return -EINVAL;
3071
3072         /* Save parameters */
3073         vi->rx_usecs = ec->rx_coalesce_usecs;
3074         vi->rx_max_packets = ec->rx_max_coalesced_frames;
3075
3076         return 0;
3077 }
3078
3079 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3080 {
3081         /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3082          * feature is negotiated.
3083          */
3084         if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3085                 return -EOPNOTSUPP;
3086
3087         if (ec->tx_max_coalesced_frames > 1 ||
3088             ec->rx_max_coalesced_frames != 1)
3089                 return -EINVAL;
3090
3091         return 0;
3092 }
3093
3094 static int virtnet_set_coalesce(struct net_device *dev,
3095                                 struct ethtool_coalesce *ec,
3096                                 struct kernel_ethtool_coalesce *kernel_coal,
3097                                 struct netlink_ext_ack *extack)
3098 {
3099         struct virtnet_info *vi = netdev_priv(dev);
3100         int ret, i, napi_weight;
3101         bool update_napi = false;
3102
3103         /* Can't change NAPI weight if the link is up */
3104         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3105         if (napi_weight ^ vi->sq[0].napi.weight) {
3106                 if (dev->flags & IFF_UP)
3107                         return -EBUSY;
3108                 else
3109                         update_napi = true;
3110         }
3111
3112         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3113                 ret = virtnet_send_notf_coal_cmds(vi, ec);
3114         else
3115                 ret = virtnet_coal_params_supported(ec);
3116
3117         if (ret)
3118                 return ret;
3119
3120         if (update_napi) {
3121                 for (i = 0; i < vi->max_queue_pairs; i++)
3122                         vi->sq[i].napi.weight = napi_weight;
3123         }
3124
3125         return ret;
3126 }
3127
3128 static int virtnet_get_coalesce(struct net_device *dev,
3129                                 struct ethtool_coalesce *ec,
3130                                 struct kernel_ethtool_coalesce *kernel_coal,
3131                                 struct netlink_ext_ack *extack)
3132 {
3133         struct virtnet_info *vi = netdev_priv(dev);
3134
3135         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3136                 ec->rx_coalesce_usecs = vi->rx_usecs;
3137                 ec->tx_coalesce_usecs = vi->tx_usecs;
3138                 ec->tx_max_coalesced_frames = vi->tx_max_packets;
3139                 ec->rx_max_coalesced_frames = vi->rx_max_packets;
3140         } else {
3141                 ec->rx_max_coalesced_frames = 1;
3142
3143                 if (vi->sq[0].napi.weight)
3144                         ec->tx_max_coalesced_frames = 1;
3145         }
3146
3147         return 0;
3148 }
3149
3150 static void virtnet_init_settings(struct net_device *dev)
3151 {
3152         struct virtnet_info *vi = netdev_priv(dev);
3153
3154         vi->speed = SPEED_UNKNOWN;
3155         vi->duplex = DUPLEX_UNKNOWN;
3156 }
3157
3158 static void virtnet_update_settings(struct virtnet_info *vi)
3159 {
3160         u32 speed;
3161         u8 duplex;
3162
3163         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3164                 return;
3165
3166         virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3167
3168         if (ethtool_validate_speed(speed))
3169                 vi->speed = speed;
3170
3171         virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3172
3173         if (ethtool_validate_duplex(duplex))
3174                 vi->duplex = duplex;
3175 }
3176
3177 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3178 {
3179         return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3180 }
3181
3182 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3183 {
3184         return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3185 }
3186
3187 static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
3188 {
3189         struct virtnet_info *vi = netdev_priv(dev);
3190         int i;
3191
3192         if (indir) {
3193                 for (i = 0; i < vi->rss_indir_table_size; ++i)
3194                         indir[i] = vi->ctrl->rss.indirection_table[i];
3195         }
3196
3197         if (key)
3198                 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3199
3200         if (hfunc)
3201                 *hfunc = ETH_RSS_HASH_TOP;
3202
3203         return 0;
3204 }
3205
3206 static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
3207 {
3208         struct virtnet_info *vi = netdev_priv(dev);
3209         int i;
3210
3211         if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3212                 return -EOPNOTSUPP;
3213
3214         if (indir) {
3215                 for (i = 0; i < vi->rss_indir_table_size; ++i)
3216                         vi->ctrl->rss.indirection_table[i] = indir[i];
3217         }
3218         if (key)
3219                 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
3220
3221         virtnet_commit_rss_command(vi);
3222
3223         return 0;
3224 }
3225
3226 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3227 {
3228         struct virtnet_info *vi = netdev_priv(dev);
3229         int rc = 0;
3230
3231         switch (info->cmd) {
3232         case ETHTOOL_GRXRINGS:
3233                 info->data = vi->curr_queue_pairs;
3234                 break;
3235         case ETHTOOL_GRXFH:
3236                 virtnet_get_hashflow(vi, info);
3237                 break;
3238         default:
3239                 rc = -EOPNOTSUPP;
3240         }
3241
3242         return rc;
3243 }
3244
3245 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3246 {
3247         struct virtnet_info *vi = netdev_priv(dev);
3248         int rc = 0;
3249
3250         switch (info->cmd) {
3251         case ETHTOOL_SRXFH:
3252                 if (!virtnet_set_hashflow(vi, info))
3253                         rc = -EINVAL;
3254
3255                 break;
3256         default:
3257                 rc = -EOPNOTSUPP;
3258         }
3259
3260         return rc;
3261 }
3262
3263 static const struct ethtool_ops virtnet_ethtool_ops = {
3264         .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3265                 ETHTOOL_COALESCE_USECS,
3266         .get_drvinfo = virtnet_get_drvinfo,
3267         .get_link = ethtool_op_get_link,
3268         .get_ringparam = virtnet_get_ringparam,
3269         .set_ringparam = virtnet_set_ringparam,
3270         .get_strings = virtnet_get_strings,
3271         .get_sset_count = virtnet_get_sset_count,
3272         .get_ethtool_stats = virtnet_get_ethtool_stats,
3273         .set_channels = virtnet_set_channels,
3274         .get_channels = virtnet_get_channels,
3275         .get_ts_info = ethtool_op_get_ts_info,
3276         .get_link_ksettings = virtnet_get_link_ksettings,
3277         .set_link_ksettings = virtnet_set_link_ksettings,
3278         .set_coalesce = virtnet_set_coalesce,
3279         .get_coalesce = virtnet_get_coalesce,
3280         .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3281         .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3282         .get_rxfh = virtnet_get_rxfh,
3283         .set_rxfh = virtnet_set_rxfh,
3284         .get_rxnfc = virtnet_get_rxnfc,
3285         .set_rxnfc = virtnet_set_rxnfc,
3286 };
3287
3288 static void virtnet_freeze_down(struct virtio_device *vdev)
3289 {
3290         struct virtnet_info *vi = vdev->priv;
3291
3292         /* Make sure no work handler is accessing the device */
3293         flush_work(&vi->config_work);
3294
3295         netif_tx_lock_bh(vi->dev);
3296         netif_device_detach(vi->dev);
3297         netif_tx_unlock_bh(vi->dev);
3298         if (netif_running(vi->dev))
3299                 virtnet_close(vi->dev);
3300 }
3301
3302 static int init_vqs(struct virtnet_info *vi);
3303
3304 static int virtnet_restore_up(struct virtio_device *vdev)
3305 {
3306         struct virtnet_info *vi = vdev->priv;
3307         int err;
3308
3309         err = init_vqs(vi);
3310         if (err)
3311                 return err;
3312
3313         virtio_device_ready(vdev);
3314
3315         enable_delayed_refill(vi);
3316
3317         if (netif_running(vi->dev)) {
3318                 err = virtnet_open(vi->dev);
3319                 if (err)
3320                         return err;
3321         }
3322
3323         netif_tx_lock_bh(vi->dev);
3324         netif_device_attach(vi->dev);
3325         netif_tx_unlock_bh(vi->dev);
3326         return err;
3327 }
3328
3329 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3330 {
3331         struct scatterlist sg;
3332         vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3333
3334         sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3335
3336         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3337                                   VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3338                 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3339                 return -EINVAL;
3340         }
3341
3342         return 0;
3343 }
3344
3345 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3346 {
3347         u64 offloads = 0;
3348
3349         if (!vi->guest_offloads)
3350                 return 0;
3351
3352         return virtnet_set_guest_offloads(vi, offloads);
3353 }
3354
3355 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3356 {
3357         u64 offloads = vi->guest_offloads;
3358
3359         if (!vi->guest_offloads)
3360                 return 0;
3361
3362         return virtnet_set_guest_offloads(vi, offloads);
3363 }
3364
3365 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3366                            struct netlink_ext_ack *extack)
3367 {
3368         unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3369                                            sizeof(struct skb_shared_info));
3370         unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3371         struct virtnet_info *vi = netdev_priv(dev);
3372         struct bpf_prog *old_prog;
3373         u16 xdp_qp = 0, curr_qp;
3374         int i, err;
3375
3376         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3377             && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3378                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3379                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3380                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3381                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3382                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3383                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3384                 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3385                 return -EOPNOTSUPP;
3386         }
3387
3388         if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3389                 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3390                 return -EINVAL;
3391         }
3392
3393         if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3394                 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3395                 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3396                 return -EINVAL;
3397         }
3398
3399         curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3400         if (prog)
3401                 xdp_qp = nr_cpu_ids;
3402
3403         /* XDP requires extra queues for XDP_TX */
3404         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3405                 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3406                                  curr_qp + xdp_qp, vi->max_queue_pairs);
3407                 xdp_qp = 0;
3408         }
3409
3410         old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3411         if (!prog && !old_prog)
3412                 return 0;
3413
3414         if (prog)
3415                 bpf_prog_add(prog, vi->max_queue_pairs - 1);
3416
3417         /* Make sure NAPI is not using any XDP TX queues for RX. */
3418         if (netif_running(dev)) {
3419                 for (i = 0; i < vi->max_queue_pairs; i++) {
3420                         napi_disable(&vi->rq[i].napi);
3421                         virtnet_napi_tx_disable(&vi->sq[i].napi);
3422                 }
3423         }
3424
3425         if (!prog) {
3426                 for (i = 0; i < vi->max_queue_pairs; i++) {
3427                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3428                         if (i == 0)
3429                                 virtnet_restore_guest_offloads(vi);
3430                 }
3431                 synchronize_net();
3432         }
3433
3434         err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
3435         if (err)
3436                 goto err;
3437         netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
3438         vi->xdp_queue_pairs = xdp_qp;
3439
3440         if (prog) {
3441                 vi->xdp_enabled = true;
3442                 for (i = 0; i < vi->max_queue_pairs; i++) {
3443                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3444                         if (i == 0 && !old_prog)
3445                                 virtnet_clear_guest_offloads(vi);
3446                 }
3447                 if (!old_prog)
3448                         xdp_features_set_redirect_target(dev, true);
3449         } else {
3450                 xdp_features_clear_redirect_target(dev);
3451                 vi->xdp_enabled = false;
3452         }
3453
3454         for (i = 0; i < vi->max_queue_pairs; i++) {
3455                 if (old_prog)
3456                         bpf_prog_put(old_prog);
3457                 if (netif_running(dev)) {
3458                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3459                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3460                                                &vi->sq[i].napi);
3461                 }
3462         }
3463
3464         return 0;
3465
3466 err:
3467         if (!prog) {
3468                 virtnet_clear_guest_offloads(vi);
3469                 for (i = 0; i < vi->max_queue_pairs; i++)
3470                         rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
3471         }
3472
3473         if (netif_running(dev)) {
3474                 for (i = 0; i < vi->max_queue_pairs; i++) {
3475                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3476                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3477                                                &vi->sq[i].napi);
3478                 }
3479         }
3480         if (prog)
3481                 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
3482         return err;
3483 }
3484
3485 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3486 {
3487         switch (xdp->command) {
3488         case XDP_SETUP_PROG:
3489                 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
3490         default:
3491                 return -EINVAL;
3492         }
3493 }
3494
3495 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
3496                                       size_t len)
3497 {
3498         struct virtnet_info *vi = netdev_priv(dev);
3499         int ret;
3500
3501         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3502                 return -EOPNOTSUPP;
3503
3504         ret = snprintf(buf, len, "sby");
3505         if (ret >= len)
3506                 return -EOPNOTSUPP;
3507
3508         return 0;
3509 }
3510
3511 static int virtnet_set_features(struct net_device *dev,
3512                                 netdev_features_t features)
3513 {
3514         struct virtnet_info *vi = netdev_priv(dev);
3515         u64 offloads;
3516         int err;
3517
3518         if ((dev->features ^ features) & NETIF_F_GRO_HW) {
3519                 if (vi->xdp_enabled)
3520                         return -EBUSY;
3521
3522                 if (features & NETIF_F_GRO_HW)
3523                         offloads = vi->guest_offloads_capable;
3524                 else
3525                         offloads = vi->guest_offloads_capable &
3526                                    ~GUEST_OFFLOAD_GRO_HW_MASK;
3527
3528                 err = virtnet_set_guest_offloads(vi, offloads);
3529                 if (err)
3530                         return err;
3531                 vi->guest_offloads = offloads;
3532         }
3533
3534         if ((dev->features ^ features) & NETIF_F_RXHASH) {
3535                 if (features & NETIF_F_RXHASH)
3536                         vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3537                 else
3538                         vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3539
3540                 if (!virtnet_commit_rss_command(vi))
3541                         return -EINVAL;
3542         }
3543
3544         return 0;
3545 }
3546
3547 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3548 {
3549         struct virtnet_info *priv = netdev_priv(dev);
3550         struct send_queue *sq = &priv->sq[txqueue];
3551         struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3552
3553         u64_stats_update_begin(&sq->stats.syncp);
3554         sq->stats.tx_timeouts++;
3555         u64_stats_update_end(&sq->stats.syncp);
3556
3557         netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3558                    txqueue, sq->name, sq->vq->index, sq->vq->name,
3559                    jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
3560 }
3561
3562 static const struct net_device_ops virtnet_netdev = {
3563         .ndo_open            = virtnet_open,
3564         .ndo_stop            = virtnet_close,
3565         .ndo_start_xmit      = start_xmit,
3566         .ndo_validate_addr   = eth_validate_addr,
3567         .ndo_set_mac_address = virtnet_set_mac_address,
3568         .ndo_set_rx_mode     = virtnet_set_rx_mode,
3569         .ndo_get_stats64     = virtnet_stats,
3570         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
3571         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
3572         .ndo_bpf                = virtnet_xdp,
3573         .ndo_xdp_xmit           = virtnet_xdp_xmit,
3574         .ndo_features_check     = passthru_features_check,
3575         .ndo_get_phys_port_name = virtnet_get_phys_port_name,
3576         .ndo_set_features       = virtnet_set_features,
3577         .ndo_tx_timeout         = virtnet_tx_timeout,
3578 };
3579
3580 static void virtnet_config_changed_work(struct work_struct *work)
3581 {
3582         struct virtnet_info *vi =
3583                 container_of(work, struct virtnet_info, config_work);
3584         u16 v;
3585
3586         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3587                                  struct virtio_net_config, status, &v) < 0)
3588                 return;
3589
3590         if (v & VIRTIO_NET_S_ANNOUNCE) {
3591                 netdev_notify_peers(vi->dev);
3592                 virtnet_ack_link_announce(vi);
3593         }
3594
3595         /* Ignore unknown (future) status bits */
3596         v &= VIRTIO_NET_S_LINK_UP;
3597
3598         if (vi->status == v)
3599                 return;
3600
3601         vi->status = v;
3602
3603         if (vi->status & VIRTIO_NET_S_LINK_UP) {
3604                 virtnet_update_settings(vi);
3605                 netif_carrier_on(vi->dev);
3606                 netif_tx_wake_all_queues(vi->dev);
3607         } else {
3608                 netif_carrier_off(vi->dev);
3609                 netif_tx_stop_all_queues(vi->dev);
3610         }
3611 }
3612
3613 static void virtnet_config_changed(struct virtio_device *vdev)
3614 {
3615         struct virtnet_info *vi = vdev->priv;
3616
3617         schedule_work(&vi->config_work);
3618 }
3619
3620 static void virtnet_free_queues(struct virtnet_info *vi)
3621 {
3622         int i;
3623
3624         for (i = 0; i < vi->max_queue_pairs; i++) {
3625                 __netif_napi_del(&vi->rq[i].napi);
3626                 __netif_napi_del(&vi->sq[i].napi);
3627         }
3628
3629         /* We called __netif_napi_del(),
3630          * we need to respect an RCU grace period before freeing vi->rq
3631          */
3632         synchronize_net();
3633
3634         kfree(vi->rq);
3635         kfree(vi->sq);
3636         kfree(vi->ctrl);
3637 }
3638
3639 static void _free_receive_bufs(struct virtnet_info *vi)
3640 {
3641         struct bpf_prog *old_prog;
3642         int i;
3643
3644         for (i = 0; i < vi->max_queue_pairs; i++) {
3645                 while (vi->rq[i].pages)
3646                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
3647
3648                 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
3649                 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
3650                 if (old_prog)
3651                         bpf_prog_put(old_prog);
3652         }
3653 }
3654
3655 static void free_receive_bufs(struct virtnet_info *vi)
3656 {
3657         rtnl_lock();
3658         _free_receive_bufs(vi);
3659         rtnl_unlock();
3660 }
3661
3662 static void free_receive_page_frags(struct virtnet_info *vi)
3663 {
3664         int i;
3665         for (i = 0; i < vi->max_queue_pairs; i++)
3666                 if (vi->rq[i].alloc_frag.page)
3667                         put_page(vi->rq[i].alloc_frag.page);
3668 }
3669
3670 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
3671 {
3672         if (!is_xdp_frame(buf))
3673                 dev_kfree_skb(buf);
3674         else
3675                 xdp_return_frame(ptr_to_xdp(buf));
3676 }
3677
3678 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
3679 {
3680         struct virtnet_info *vi = vq->vdev->priv;
3681         int i = vq2rxq(vq);
3682
3683         if (vi->mergeable_rx_bufs)
3684                 put_page(virt_to_head_page(buf));
3685         else if (vi->big_packets)
3686                 give_pages(&vi->rq[i], buf);
3687         else
3688                 put_page(virt_to_head_page(buf));
3689 }
3690
3691 static void free_unused_bufs(struct virtnet_info *vi)
3692 {
3693         void *buf;
3694         int i;
3695
3696         for (i = 0; i < vi->max_queue_pairs; i++) {
3697                 struct virtqueue *vq = vi->sq[i].vq;
3698                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
3699                         virtnet_sq_free_unused_buf(vq, buf);
3700                 cond_resched();
3701         }
3702
3703         for (i = 0; i < vi->max_queue_pairs; i++) {
3704                 struct virtqueue *vq = vi->rq[i].vq;
3705                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
3706                         virtnet_rq_free_unused_buf(vq, buf);
3707                 cond_resched();
3708         }
3709 }
3710
3711 static void virtnet_del_vqs(struct virtnet_info *vi)
3712 {
3713         struct virtio_device *vdev = vi->vdev;
3714
3715         virtnet_clean_affinity(vi);
3716
3717         vdev->config->del_vqs(vdev);
3718
3719         virtnet_free_queues(vi);
3720 }
3721
3722 /* How large should a single buffer be so a queue full of these can fit at
3723  * least one full packet?
3724  * Logic below assumes the mergeable buffer header is used.
3725  */
3726 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
3727 {
3728         const unsigned int hdr_len = vi->hdr_len;
3729         unsigned int rq_size = virtqueue_get_vring_size(vq);
3730         unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
3731         unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
3732         unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
3733
3734         return max(max(min_buf_len, hdr_len) - hdr_len,
3735                    (unsigned int)GOOD_PACKET_LEN);
3736 }
3737
3738 static int virtnet_find_vqs(struct virtnet_info *vi)
3739 {
3740         vq_callback_t **callbacks;
3741         struct virtqueue **vqs;
3742         int ret = -ENOMEM;
3743         int i, total_vqs;
3744         const char **names;
3745         bool *ctx;
3746
3747         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
3748          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
3749          * possible control vq.
3750          */
3751         total_vqs = vi->max_queue_pairs * 2 +
3752                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
3753
3754         /* Allocate space for find_vqs parameters */
3755         vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
3756         if (!vqs)
3757                 goto err_vq;
3758         callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
3759         if (!callbacks)
3760                 goto err_callback;
3761         names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
3762         if (!names)
3763                 goto err_names;
3764         if (!vi->big_packets || vi->mergeable_rx_bufs) {
3765                 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
3766                 if (!ctx)
3767                         goto err_ctx;
3768         } else {
3769                 ctx = NULL;
3770         }
3771
3772         /* Parameters for control virtqueue, if any */
3773         if (vi->has_cvq) {
3774                 callbacks[total_vqs - 1] = NULL;
3775                 names[total_vqs - 1] = "control";
3776         }
3777
3778         /* Allocate/initialize parameters for send/receive virtqueues */
3779         for (i = 0; i < vi->max_queue_pairs; i++) {
3780                 callbacks[rxq2vq(i)] = skb_recv_done;
3781                 callbacks[txq2vq(i)] = skb_xmit_done;
3782                 sprintf(vi->rq[i].name, "input.%d", i);
3783                 sprintf(vi->sq[i].name, "output.%d", i);
3784                 names[rxq2vq(i)] = vi->rq[i].name;
3785                 names[txq2vq(i)] = vi->sq[i].name;
3786                 if (ctx)
3787                         ctx[rxq2vq(i)] = true;
3788         }
3789
3790         ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
3791                                   names, ctx, NULL);
3792         if (ret)
3793                 goto err_find;
3794
3795         if (vi->has_cvq) {
3796                 vi->cvq = vqs[total_vqs - 1];
3797                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
3798                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3799         }
3800
3801         for (i = 0; i < vi->max_queue_pairs; i++) {
3802                 vi->rq[i].vq = vqs[rxq2vq(i)];
3803                 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
3804                 vi->sq[i].vq = vqs[txq2vq(i)];
3805         }
3806
3807         /* run here: ret == 0. */
3808
3809
3810 err_find:
3811         kfree(ctx);
3812 err_ctx:
3813         kfree(names);
3814 err_names:
3815         kfree(callbacks);
3816 err_callback:
3817         kfree(vqs);
3818 err_vq:
3819         return ret;
3820 }
3821
3822 static int virtnet_alloc_queues(struct virtnet_info *vi)
3823 {
3824         int i;
3825
3826         if (vi->has_cvq) {
3827                 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
3828                 if (!vi->ctrl)
3829                         goto err_ctrl;
3830         } else {
3831                 vi->ctrl = NULL;
3832         }
3833         vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
3834         if (!vi->sq)
3835                 goto err_sq;
3836         vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
3837         if (!vi->rq)
3838                 goto err_rq;
3839
3840         INIT_DELAYED_WORK(&vi->refill, refill_work);
3841         for (i = 0; i < vi->max_queue_pairs; i++) {
3842                 vi->rq[i].pages = NULL;
3843                 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
3844                                       napi_weight);
3845                 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
3846                                          virtnet_poll_tx,
3847                                          napi_tx ? napi_weight : 0);
3848
3849                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
3850                 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
3851                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
3852
3853                 u64_stats_init(&vi->rq[i].stats.syncp);
3854                 u64_stats_init(&vi->sq[i].stats.syncp);
3855         }
3856
3857         return 0;
3858
3859 err_rq:
3860         kfree(vi->sq);
3861 err_sq:
3862         kfree(vi->ctrl);
3863 err_ctrl:
3864         return -ENOMEM;
3865 }
3866
3867 static int init_vqs(struct virtnet_info *vi)
3868 {
3869         int ret;
3870
3871         /* Allocate send & receive queues */
3872         ret = virtnet_alloc_queues(vi);
3873         if (ret)
3874                 goto err;
3875
3876         ret = virtnet_find_vqs(vi);
3877         if (ret)
3878                 goto err_free;
3879
3880         cpus_read_lock();
3881         virtnet_set_affinity(vi);
3882         cpus_read_unlock();
3883
3884         return 0;
3885
3886 err_free:
3887         virtnet_free_queues(vi);
3888 err:
3889         return ret;
3890 }
3891
3892 #ifdef CONFIG_SYSFS
3893 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
3894                 char *buf)
3895 {
3896         struct virtnet_info *vi = netdev_priv(queue->dev);
3897         unsigned int queue_index = get_netdev_rx_queue_index(queue);
3898         unsigned int headroom = virtnet_get_headroom(vi);
3899         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
3900         struct ewma_pkt_len *avg;
3901
3902         BUG_ON(queue_index >= vi->max_queue_pairs);
3903         avg = &vi->rq[queue_index].mrg_avg_pkt_len;
3904         return sprintf(buf, "%u\n",
3905                        get_mergeable_buf_len(&vi->rq[queue_index], avg,
3906                                        SKB_DATA_ALIGN(headroom + tailroom)));
3907 }
3908
3909 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
3910         __ATTR_RO(mergeable_rx_buffer_size);
3911
3912 static struct attribute *virtio_net_mrg_rx_attrs[] = {
3913         &mergeable_rx_buffer_size_attribute.attr,
3914         NULL
3915 };
3916
3917 static const struct attribute_group virtio_net_mrg_rx_group = {
3918         .name = "virtio_net",
3919         .attrs = virtio_net_mrg_rx_attrs
3920 };
3921 #endif
3922
3923 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
3924                                     unsigned int fbit,
3925                                     const char *fname, const char *dname)
3926 {
3927         if (!virtio_has_feature(vdev, fbit))
3928                 return false;
3929
3930         dev_err(&vdev->dev, "device advertises feature %s but not %s",
3931                 fname, dname);
3932
3933         return true;
3934 }
3935
3936 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)                       \
3937         virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
3938
3939 static bool virtnet_validate_features(struct virtio_device *vdev)
3940 {
3941         if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
3942             (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
3943                              "VIRTIO_NET_F_CTRL_VQ") ||
3944              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
3945                              "VIRTIO_NET_F_CTRL_VQ") ||
3946              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
3947                              "VIRTIO_NET_F_CTRL_VQ") ||
3948              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
3949              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
3950                              "VIRTIO_NET_F_CTRL_VQ") ||
3951              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
3952                              "VIRTIO_NET_F_CTRL_VQ") ||
3953              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
3954                              "VIRTIO_NET_F_CTRL_VQ") ||
3955              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
3956                              "VIRTIO_NET_F_CTRL_VQ"))) {
3957                 return false;
3958         }
3959
3960         return true;
3961 }
3962
3963 #define MIN_MTU ETH_MIN_MTU
3964 #define MAX_MTU ETH_MAX_MTU
3965
3966 static int virtnet_validate(struct virtio_device *vdev)
3967 {
3968         if (!vdev->config->get) {
3969                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
3970                         __func__);
3971                 return -EINVAL;
3972         }
3973
3974         if (!virtnet_validate_features(vdev))
3975                 return -EINVAL;
3976
3977         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3978                 int mtu = virtio_cread16(vdev,
3979                                          offsetof(struct virtio_net_config,
3980                                                   mtu));
3981                 if (mtu < MIN_MTU)
3982                         __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3983         }
3984
3985         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
3986             !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
3987                 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
3988                 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
3989         }
3990
3991         return 0;
3992 }
3993
3994 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
3995 {
3996         return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3997                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3998                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3999                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4000                 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4001                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4002 }
4003
4004 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4005 {
4006         bool guest_gso = virtnet_check_guest_gso(vi);
4007
4008         /* If device can receive ANY guest GSO packets, regardless of mtu,
4009          * allocate packets of maximum size, otherwise limit it to only
4010          * mtu size worth only.
4011          */
4012         if (mtu > ETH_DATA_LEN || guest_gso) {
4013                 vi->big_packets = true;
4014                 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4015         }
4016 }
4017
4018 static int virtnet_probe(struct virtio_device *vdev)
4019 {
4020         int i, err = -ENOMEM;
4021         struct net_device *dev;
4022         struct virtnet_info *vi;
4023         u16 max_queue_pairs;
4024         int mtu = 0;
4025
4026         /* Find if host supports multiqueue/rss virtio_net device */
4027         max_queue_pairs = 1;
4028         if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4029                 max_queue_pairs =
4030                      virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4031
4032         /* We need at least 2 queue's */
4033         if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4034             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4035             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4036                 max_queue_pairs = 1;
4037
4038         /* Allocate ourselves a network device with room for our info */
4039         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4040         if (!dev)
4041                 return -ENOMEM;
4042
4043         /* Set up network device as normal. */
4044         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4045                            IFF_TX_SKB_NO_LINEAR;
4046         dev->netdev_ops = &virtnet_netdev;
4047         dev->features = NETIF_F_HIGHDMA;
4048
4049         dev->ethtool_ops = &virtnet_ethtool_ops;
4050         SET_NETDEV_DEV(dev, &vdev->dev);
4051
4052         /* Do we support "hardware" checksums? */
4053         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4054                 /* This opens up the world of extra features. */
4055                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4056                 if (csum)
4057                         dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4058
4059                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4060                         dev->hw_features |= NETIF_F_TSO
4061                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
4062                 }
4063                 /* Individual feature bits: what can host handle? */
4064                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4065                         dev->hw_features |= NETIF_F_TSO;
4066                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4067                         dev->hw_features |= NETIF_F_TSO6;
4068                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4069                         dev->hw_features |= NETIF_F_TSO_ECN;
4070                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4071                         dev->hw_features |= NETIF_F_GSO_UDP_L4;
4072
4073                 dev->features |= NETIF_F_GSO_ROBUST;
4074
4075                 if (gso)
4076                         dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4077                 /* (!csum && gso) case will be fixed by register_netdev() */
4078         }
4079         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4080                 dev->features |= NETIF_F_RXCSUM;
4081         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4082             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4083                 dev->features |= NETIF_F_GRO_HW;
4084         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4085                 dev->hw_features |= NETIF_F_GRO_HW;
4086
4087         dev->vlan_features = dev->features;
4088         dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4089
4090         /* MTU range: 68 - 65535 */
4091         dev->min_mtu = MIN_MTU;
4092         dev->max_mtu = MAX_MTU;
4093
4094         /* Configuration may specify what MAC to use.  Otherwise random. */
4095         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4096                 u8 addr[ETH_ALEN];
4097
4098                 virtio_cread_bytes(vdev,
4099                                    offsetof(struct virtio_net_config, mac),
4100                                    addr, ETH_ALEN);
4101                 eth_hw_addr_set(dev, addr);
4102         } else {
4103                 eth_hw_addr_random(dev);
4104                 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4105                          dev->dev_addr);
4106         }
4107
4108         /* Set up our device-specific information */
4109         vi = netdev_priv(dev);
4110         vi->dev = dev;
4111         vi->vdev = vdev;
4112         vdev->priv = vi;
4113
4114         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4115         spin_lock_init(&vi->refill_lock);
4116
4117         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4118                 vi->mergeable_rx_bufs = true;
4119                 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4120         }
4121
4122         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4123                 vi->rx_usecs = 0;
4124                 vi->tx_usecs = 0;
4125                 vi->tx_max_packets = 0;
4126                 vi->rx_max_packets = 0;
4127         }
4128
4129         if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4130                 vi->has_rss_hash_report = true;
4131
4132         if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4133                 vi->has_rss = true;
4134
4135         if (vi->has_rss || vi->has_rss_hash_report) {
4136                 vi->rss_indir_table_size =
4137                         virtio_cread16(vdev, offsetof(struct virtio_net_config,
4138                                 rss_max_indirection_table_length));
4139                 vi->rss_key_size =
4140                         virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4141
4142                 vi->rss_hash_types_supported =
4143                     virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4144                 vi->rss_hash_types_supported &=
4145                                 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4146                                   VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4147                                   VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4148
4149                 dev->hw_features |= NETIF_F_RXHASH;
4150         }
4151
4152         if (vi->has_rss_hash_report)
4153                 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4154         else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4155                  virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4156                 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4157         else
4158                 vi->hdr_len = sizeof(struct virtio_net_hdr);
4159
4160         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4161             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4162                 vi->any_header_sg = true;
4163
4164         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4165                 vi->has_cvq = true;
4166
4167         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4168                 mtu = virtio_cread16(vdev,
4169                                      offsetof(struct virtio_net_config,
4170                                               mtu));
4171                 if (mtu < dev->min_mtu) {
4172                         /* Should never trigger: MTU was previously validated
4173                          * in virtnet_validate.
4174                          */
4175                         dev_err(&vdev->dev,
4176                                 "device MTU appears to have changed it is now %d < %d",
4177                                 mtu, dev->min_mtu);
4178                         err = -EINVAL;
4179                         goto free;
4180                 }
4181
4182                 dev->mtu = mtu;
4183                 dev->max_mtu = mtu;
4184         }
4185
4186         virtnet_set_big_packets(vi, mtu);
4187
4188         if (vi->any_header_sg)
4189                 dev->needed_headroom = vi->hdr_len;
4190
4191         /* Enable multiqueue by default */
4192         if (num_online_cpus() >= max_queue_pairs)
4193                 vi->curr_queue_pairs = max_queue_pairs;
4194         else
4195                 vi->curr_queue_pairs = num_online_cpus();
4196         vi->max_queue_pairs = max_queue_pairs;
4197
4198         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4199         err = init_vqs(vi);
4200         if (err)
4201                 goto free;
4202
4203 #ifdef CONFIG_SYSFS
4204         if (vi->mergeable_rx_bufs)
4205                 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4206 #endif
4207         netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4208         netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4209
4210         virtnet_init_settings(dev);
4211
4212         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4213                 vi->failover = net_failover_create(vi->dev);
4214                 if (IS_ERR(vi->failover)) {
4215                         err = PTR_ERR(vi->failover);
4216                         goto free_vqs;
4217                 }
4218         }
4219
4220         if (vi->has_rss || vi->has_rss_hash_report)
4221                 virtnet_init_default_rss(vi);
4222
4223         /* serialize netdev register + virtio_device_ready() with ndo_open() */
4224         rtnl_lock();
4225
4226         err = register_netdevice(dev);
4227         if (err) {
4228                 pr_debug("virtio_net: registering device failed\n");
4229                 rtnl_unlock();
4230                 goto free_failover;
4231         }
4232
4233         virtio_device_ready(vdev);
4234
4235         /* a random MAC address has been assigned, notify the device.
4236          * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4237          * because many devices work fine without getting MAC explicitly
4238          */
4239         if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4240             virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4241                 struct scatterlist sg;
4242
4243                 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4244                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4245                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4246                         pr_debug("virtio_net: setting MAC address failed\n");
4247                         rtnl_unlock();
4248                         err = -EINVAL;
4249                         goto free_unregister_netdev;
4250                 }
4251         }
4252
4253         rtnl_unlock();
4254
4255         err = virtnet_cpu_notif_add(vi);
4256         if (err) {
4257                 pr_debug("virtio_net: registering cpu notifier failed\n");
4258                 goto free_unregister_netdev;
4259         }
4260
4261         virtnet_set_queues(vi, vi->curr_queue_pairs);
4262
4263         /* Assume link up if device can't report link status,
4264            otherwise get link status from config. */
4265         netif_carrier_off(dev);
4266         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4267                 schedule_work(&vi->config_work);
4268         } else {
4269                 vi->status = VIRTIO_NET_S_LINK_UP;
4270                 virtnet_update_settings(vi);
4271                 netif_carrier_on(dev);
4272         }
4273
4274         for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4275                 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4276                         set_bit(guest_offloads[i], &vi->guest_offloads);
4277         vi->guest_offloads_capable = vi->guest_offloads;
4278
4279         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4280                  dev->name, max_queue_pairs);
4281
4282         return 0;
4283
4284 free_unregister_netdev:
4285         unregister_netdev(dev);
4286 free_failover:
4287         net_failover_destroy(vi->failover);
4288 free_vqs:
4289         virtio_reset_device(vdev);
4290         cancel_delayed_work_sync(&vi->refill);
4291         free_receive_page_frags(vi);
4292         virtnet_del_vqs(vi);
4293 free:
4294         free_netdev(dev);
4295         return err;
4296 }
4297
4298 static void remove_vq_common(struct virtnet_info *vi)
4299 {
4300         virtio_reset_device(vi->vdev);
4301
4302         /* Free unused buffers in both send and recv, if any. */
4303         free_unused_bufs(vi);
4304
4305         free_receive_bufs(vi);
4306
4307         free_receive_page_frags(vi);
4308
4309         virtnet_del_vqs(vi);
4310 }
4311
4312 static void virtnet_remove(struct virtio_device *vdev)
4313 {
4314         struct virtnet_info *vi = vdev->priv;
4315
4316         virtnet_cpu_notif_remove(vi);
4317
4318         /* Make sure no work handler is accessing the device. */
4319         flush_work(&vi->config_work);
4320
4321         unregister_netdev(vi->dev);
4322
4323         net_failover_destroy(vi->failover);
4324
4325         remove_vq_common(vi);
4326
4327         free_netdev(vi->dev);
4328 }
4329
4330 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
4331 {
4332         struct virtnet_info *vi = vdev->priv;
4333
4334         virtnet_cpu_notif_remove(vi);
4335         virtnet_freeze_down(vdev);
4336         remove_vq_common(vi);
4337
4338         return 0;
4339 }
4340
4341 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4342 {
4343         struct virtnet_info *vi = vdev->priv;
4344         int err;
4345
4346         err = virtnet_restore_up(vdev);
4347         if (err)
4348                 return err;
4349         virtnet_set_queues(vi, vi->curr_queue_pairs);
4350
4351         err = virtnet_cpu_notif_add(vi);
4352         if (err) {
4353                 virtnet_freeze_down(vdev);
4354                 remove_vq_common(vi);
4355                 return err;
4356         }
4357
4358         return 0;
4359 }
4360
4361 static struct virtio_device_id id_table[] = {
4362         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4363         { 0 },
4364 };
4365
4366 #define VIRTNET_FEATURES \
4367         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4368         VIRTIO_NET_F_MAC, \
4369         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4370         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4371         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4372         VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4373         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4374         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4375         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4376         VIRTIO_NET_F_CTRL_MAC_ADDR, \
4377         VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4378         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4379         VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4380         VIRTIO_NET_F_GUEST_HDRLEN
4381
4382 static unsigned int features[] = {
4383         VIRTNET_FEATURES,
4384 };
4385
4386 static unsigned int features_legacy[] = {
4387         VIRTNET_FEATURES,
4388         VIRTIO_NET_F_GSO,
4389         VIRTIO_F_ANY_LAYOUT,
4390 };
4391
4392 static struct virtio_driver virtio_net_driver = {
4393         .feature_table = features,
4394         .feature_table_size = ARRAY_SIZE(features),
4395         .feature_table_legacy = features_legacy,
4396         .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4397         .driver.name =  KBUILD_MODNAME,
4398         .driver.owner = THIS_MODULE,
4399         .id_table =     id_table,
4400         .validate =     virtnet_validate,
4401         .probe =        virtnet_probe,
4402         .remove =       virtnet_remove,
4403         .config_changed = virtnet_config_changed,
4404 #ifdef CONFIG_PM_SLEEP
4405         .freeze =       virtnet_freeze,
4406         .restore =      virtnet_restore,
4407 #endif
4408 };
4409
4410 static __init int virtio_net_driver_init(void)
4411 {
4412         int ret;
4413
4414         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4415                                       virtnet_cpu_online,
4416                                       virtnet_cpu_down_prep);
4417         if (ret < 0)
4418                 goto out;
4419         virtionet_online = ret;
4420         ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
4421                                       NULL, virtnet_cpu_dead);
4422         if (ret)
4423                 goto err_dead;
4424         ret = register_virtio_driver(&virtio_net_driver);
4425         if (ret)
4426                 goto err_virtio;
4427         return 0;
4428 err_virtio:
4429         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
4430 err_dead:
4431         cpuhp_remove_multi_state(virtionet_online);
4432 out:
4433         return ret;
4434 }
4435 module_init(virtio_net_driver_init);
4436
4437 static __exit void virtio_net_driver_exit(void)
4438 {
4439         unregister_virtio_driver(&virtio_net_driver);
4440         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
4441         cpuhp_remove_multi_state(virtionet_online);
4442 }
4443 module_exit(virtio_net_driver_exit);
4444
4445 MODULE_DEVICE_TABLE(virtio, id_table);
4446 MODULE_DESCRIPTION("Virtio network driver");
4447 MODULE_LICENSE("GPL");