2 * Network-device interface management.
4 * Copyright (c) 2004-2005, Keir Fraser
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
44 /* Number of bytes allowed on the internal guest Rx queue. */
45 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
47 /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
48 * increasing the inflight counter. We need to increase the inflight
49 * counter because core driver calls into xenvif_zerocopy_callback
50 * which calls xenvif_skb_zerocopy_complete.
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
56 atomic_inc(&queue->inflight_packets);
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
61 atomic_dec(&queue->inflight_packets);
63 /* Wake the dealloc thread _after_ decrementing inflight_packets so
64 * that if kthread_stop() has already been called, the dealloc thread
65 * does not wait forever with nothing to wake it.
67 wake_up(&queue->dealloc_wq);
70 static int xenvif_schedulable(struct xenvif *vif)
72 return netif_running(vif->dev) &&
73 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
83 napi_schedule(&queue->napi);
87 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
89 struct xenvif_queue *queue = dev_id;
92 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
93 WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
95 if (!xenvif_handle_tx_interrupt(queue)) {
96 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
97 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
103 static int xenvif_poll(struct napi_struct *napi, int budget)
105 struct xenvif_queue *queue =
106 container_of(napi, struct xenvif_queue, napi);
109 /* This vif is rogue, we pretend we've there is nothing to do
110 * for this vif to deschedule it from NAPI. But this interface
111 * will be turned off in thread context later.
113 if (unlikely(queue->vif->disabled)) {
118 work_done = xenvif_tx_action(queue, budget);
120 if (work_done < budget) {
121 napi_complete_done(napi, work_done);
122 /* If the queue is rate-limited, it shall be
123 * rescheduled in the timer callback.
125 if (likely(!queue->rate_limited))
126 xenvif_napi_schedule_or_enable_events(queue);
132 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
136 rc = xenvif_have_rx_work(queue, false);
138 xenvif_kick_thread(queue);
142 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
144 struct xenvif_queue *queue = dev_id;
147 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
148 WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
150 if (!xenvif_handle_rx_interrupt(queue)) {
151 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
152 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
158 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
160 struct xenvif_queue *queue = dev_id;
164 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
165 WARN(old, "Interrupt while EOI pending\n");
167 has_tx = xenvif_handle_tx_interrupt(queue);
168 has_rx = xenvif_handle_rx_interrupt(queue);
170 if (!has_rx && !has_tx) {
171 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
172 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
178 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
179 struct net_device *sb_dev)
181 struct xenvif *vif = netdev_priv(dev);
182 unsigned int size = vif->hash.size;
183 unsigned int num_queues;
185 /* If queues are not set up internally - always return 0
186 * as the packet going to be dropped anyway */
187 num_queues = READ_ONCE(vif->num_queues);
191 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
192 return netdev_pick_tx(dev, skb, NULL) %
193 dev->real_num_tx_queues;
195 xenvif_set_skb_hash(vif, skb);
198 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
200 return vif->hash.mapping[vif->hash.mapping_sel]
201 [skb_get_hash_raw(skb) % size];
205 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
207 struct xenvif *vif = netdev_priv(dev);
208 struct xenvif_queue *queue = NULL;
209 unsigned int num_queues;
211 struct xenvif_rx_cb *cb;
213 BUG_ON(skb->dev != dev);
215 /* Drop the packet if queues are not set up.
216 * This handler should be called inside an RCU read section
217 * so we don't need to enter it here explicitly.
219 num_queues = READ_ONCE(vif->num_queues);
223 /* Obtain the queue to be used to transmit this packet */
224 index = skb_get_queue_mapping(skb);
225 if (index >= num_queues) {
226 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
227 index, vif->dev->name);
230 queue = &vif->queues[index];
232 /* Drop the packet if queue is not ready */
233 if (queue->task == NULL ||
234 queue->dealloc_task == NULL ||
235 !xenvif_schedulable(vif))
238 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
239 struct ethhdr *eth = (struct ethhdr *)skb->data;
241 if (!xenvif_mcast_match(vif, eth->h_dest))
245 cb = XENVIF_RX_CB(skb);
246 cb->expires = jiffies + vif->drain_timeout;
248 /* If there is no hash algorithm configured then make sure there
249 * is no hash information in the socket buffer otherwise it
250 * would be incorrectly forwarded to the frontend.
252 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
255 if (!xenvif_rx_queue_tail(queue, skb))
258 xenvif_kick_thread(queue);
263 vif->dev->stats.tx_dropped++;
264 dev_kfree_skb_any(skb);
268 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
270 struct xenvif *vif = netdev_priv(dev);
271 struct xenvif_queue *queue = NULL;
272 unsigned int num_queues;
280 num_queues = READ_ONCE(vif->num_queues);
282 /* Aggregate tx and rx stats from each queue */
283 for (index = 0; index < num_queues; ++index) {
284 queue = &vif->queues[index];
285 rx_bytes += queue->stats.rx_bytes;
286 rx_packets += queue->stats.rx_packets;
287 tx_bytes += queue->stats.tx_bytes;
288 tx_packets += queue->stats.tx_packets;
293 vif->dev->stats.rx_bytes = rx_bytes;
294 vif->dev->stats.rx_packets = rx_packets;
295 vif->dev->stats.tx_bytes = tx_bytes;
296 vif->dev->stats.tx_packets = tx_packets;
298 return &vif->dev->stats;
301 static void xenvif_up(struct xenvif *vif)
303 struct xenvif_queue *queue = NULL;
304 unsigned int num_queues = vif->num_queues;
305 unsigned int queue_index;
307 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
308 queue = &vif->queues[queue_index];
309 napi_enable(&queue->napi);
310 enable_irq(queue->tx_irq);
311 if (queue->tx_irq != queue->rx_irq)
312 enable_irq(queue->rx_irq);
313 xenvif_napi_schedule_or_enable_events(queue);
317 static void xenvif_down(struct xenvif *vif)
319 struct xenvif_queue *queue = NULL;
320 unsigned int num_queues = vif->num_queues;
321 unsigned int queue_index;
323 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
324 queue = &vif->queues[queue_index];
325 disable_irq(queue->tx_irq);
326 if (queue->tx_irq != queue->rx_irq)
327 disable_irq(queue->rx_irq);
328 napi_disable(&queue->napi);
329 del_timer_sync(&queue->credit_timeout);
333 static int xenvif_open(struct net_device *dev)
335 struct xenvif *vif = netdev_priv(dev);
336 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
338 netif_tx_start_all_queues(dev);
342 static int xenvif_close(struct net_device *dev)
344 struct xenvif *vif = netdev_priv(dev);
345 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
347 netif_tx_stop_all_queues(dev);
351 static int xenvif_change_mtu(struct net_device *dev, int mtu)
353 struct xenvif *vif = netdev_priv(dev);
354 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
362 static netdev_features_t xenvif_fix_features(struct net_device *dev,
363 netdev_features_t features)
365 struct xenvif *vif = netdev_priv(dev);
368 features &= ~NETIF_F_SG;
369 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
370 features &= ~NETIF_F_TSO;
371 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
372 features &= ~NETIF_F_TSO6;
374 features &= ~NETIF_F_IP_CSUM;
376 features &= ~NETIF_F_IPV6_CSUM;
381 static const struct xenvif_stat {
382 char name[ETH_GSTRING_LEN];
386 "rx_gso_checksum_fixup",
387 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
389 /* If (sent != success + fail), there are probably packets never
394 offsetof(struct xenvif_stats, tx_zerocopy_sent),
397 "tx_zerocopy_success",
398 offsetof(struct xenvif_stats, tx_zerocopy_success),
402 offsetof(struct xenvif_stats, tx_zerocopy_fail)
404 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
405 * a guest with the same MAX_SKB_FRAG
409 offsetof(struct xenvif_stats, tx_frag_overflow)
413 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
415 switch (string_set) {
417 return ARRAY_SIZE(xenvif_stats);
423 static void xenvif_get_ethtool_stats(struct net_device *dev,
424 struct ethtool_stats *stats, u64 * data)
426 struct xenvif *vif = netdev_priv(dev);
427 unsigned int num_queues;
429 unsigned int queue_index;
432 num_queues = READ_ONCE(vif->num_queues);
434 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
435 unsigned long accum = 0;
436 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
437 void *vif_stats = &vif->queues[queue_index].stats;
438 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
446 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
452 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
453 memcpy(data + i * ETH_GSTRING_LEN,
454 xenvif_stats[i].name, ETH_GSTRING_LEN);
459 static const struct ethtool_ops xenvif_ethtool_ops = {
460 .get_link = ethtool_op_get_link,
462 .get_sset_count = xenvif_get_sset_count,
463 .get_ethtool_stats = xenvif_get_ethtool_stats,
464 .get_strings = xenvif_get_strings,
467 static const struct net_device_ops xenvif_netdev_ops = {
468 .ndo_select_queue = xenvif_select_queue,
469 .ndo_start_xmit = xenvif_start_xmit,
470 .ndo_get_stats = xenvif_get_stats,
471 .ndo_open = xenvif_open,
472 .ndo_stop = xenvif_close,
473 .ndo_change_mtu = xenvif_change_mtu,
474 .ndo_fix_features = xenvif_fix_features,
475 .ndo_set_mac_address = eth_mac_addr,
476 .ndo_validate_addr = eth_validate_addr,
479 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
482 static const u8 dummy_addr[ETH_ALEN] = {
483 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
486 struct net_device *dev;
488 char name[IFNAMSIZ] = {};
490 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
491 /* Allocate a netdev with the max. supported number of queues.
492 * When the guest selects the desired number, it will be updated
493 * via netif_set_real_num_*_queues().
495 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
496 ether_setup, xenvif_max_queues);
498 pr_warn("Could not allocate netdev for %s\n", name);
499 return ERR_PTR(-ENOMEM);
502 SET_NETDEV_DEV(dev, parent);
504 vif = netdev_priv(dev);
507 vif->handle = handle;
511 vif->disabled = false;
512 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
513 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
515 /* Start out with no queues. */
519 vif->xdp_headroom = 0;
521 spin_lock_init(&vif->lock);
522 INIT_LIST_HEAD(&vif->fe_mcast_addr);
524 dev->netdev_ops = &xenvif_netdev_ops;
525 dev->hw_features = NETIF_F_SG |
526 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
527 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
528 dev->features = dev->hw_features | NETIF_F_RXCSUM;
529 dev->ethtool_ops = &xenvif_ethtool_ops;
531 dev->min_mtu = ETH_MIN_MTU;
532 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
535 * Initialise a dummy MAC address. We choose the numerically
536 * largest non-broadcast address to prevent the address getting
537 * stolen by an Ethernet bridge for STP purposes.
538 * (FE:FF:FF:FF:FF:FF)
540 eth_hw_addr_set(dev, dummy_addr);
542 netif_carrier_off(dev);
544 err = register_netdev(dev);
546 netdev_warn(dev, "Could not register device: err=%d\n", err);
551 netdev_dbg(dev, "Successfully created xenvif\n");
553 __module_get(THIS_MODULE);
558 int xenvif_init_queue(struct xenvif_queue *queue)
562 queue->credit_bytes = queue->remaining_credit = ~0UL;
563 queue->credit_usec = 0UL;
564 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
565 queue->credit_window_start = get_jiffies_64();
567 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
569 skb_queue_head_init(&queue->rx_queue);
570 skb_queue_head_init(&queue->tx_queue);
572 queue->pending_cons = 0;
573 queue->pending_prod = MAX_PENDING_REQS;
574 for (i = 0; i < MAX_PENDING_REQS; ++i)
575 queue->pending_ring[i] = i;
577 spin_lock_init(&queue->callback_lock);
578 spin_lock_init(&queue->response_lock);
580 /* If ballooning is disabled, this will consume real memory, so you
581 * better enable it. The long term solution would be to use just a
582 * bunch of valid page descriptors, without dependency on ballooning
584 err = gnttab_alloc_pages(MAX_PENDING_REQS,
587 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
591 for (i = 0; i < MAX_PENDING_REQS; i++) {
592 queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
593 { { .callback = xenvif_zerocopy_callback },
596 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
602 void xenvif_carrier_on(struct xenvif *vif)
605 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
606 dev_set_mtu(vif->dev, ETH_DATA_LEN);
607 netdev_update_features(vif->dev);
608 set_bit(VIF_STATUS_CONNECTED, &vif->status);
609 if (netif_running(vif->dev))
614 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
617 struct net_device *dev = vif->dev;
618 struct xenbus_device *xendev = xenvif_to_xenbus_device(vif);
620 struct xen_netif_ctrl_sring *shared;
621 RING_IDX rsp_prod, req_prod;
624 err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr);
628 shared = (struct xen_netif_ctrl_sring *)addr;
629 rsp_prod = READ_ONCE(shared->rsp_prod);
630 req_prod = READ_ONCE(shared->req_prod);
632 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
635 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
638 err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn);
644 xenvif_init_hash(vif);
646 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
647 IRQF_ONESHOT, "xen-netback-ctrl", vif);
649 pr_warn("Could not setup irq handler for %s\n", dev->name);
656 xenvif_deinit_hash(vif);
657 unbind_from_irqhandler(vif->ctrl_irq, vif);
661 xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring);
662 vif->ctrl.sring = NULL;
668 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
671 kthread_stop(queue->task);
672 put_task_struct(queue->task);
676 if (queue->dealloc_task) {
677 kthread_stop(queue->dealloc_task);
678 queue->dealloc_task = NULL;
681 if (queue->napi.poll) {
682 netif_napi_del(&queue->napi);
683 queue->napi.poll = NULL;
687 unbind_from_irqhandler(queue->tx_irq, queue);
688 if (queue->tx_irq == queue->rx_irq)
694 unbind_from_irqhandler(queue->rx_irq, queue);
698 xenvif_unmap_frontend_data_rings(queue);
701 int xenvif_connect_data(struct xenvif_queue *queue,
702 unsigned long tx_ring_ref,
703 unsigned long rx_ring_ref,
704 unsigned int tx_evtchn,
705 unsigned int rx_evtchn)
707 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
708 struct task_struct *task;
711 BUG_ON(queue->tx_irq);
713 BUG_ON(queue->dealloc_task);
715 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
720 init_waitqueue_head(&queue->wq);
721 init_waitqueue_head(&queue->dealloc_wq);
722 atomic_set(&queue->inflight_packets, 0);
724 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
726 queue->stalled = true;
728 task = kthread_run(xenvif_kthread_guest_rx, queue,
729 "%s-guest-rx", queue->name);
734 * Take a reference to the task in order to prevent it from being freed
735 * if the thread function returns before kthread_stop is called.
737 get_task_struct(task);
739 task = kthread_run(xenvif_dealloc_kthread, queue,
740 "%s-dealloc", queue->name);
743 queue->dealloc_task = task;
745 if (tx_evtchn == rx_evtchn) {
746 /* feature-split-event-channels == 0 */
747 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
748 dev, tx_evtchn, xenvif_interrupt, 0,
752 queue->tx_irq = queue->rx_irq = err;
753 disable_irq(queue->tx_irq);
755 /* feature-split-event-channels == 1 */
756 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
757 "%s-tx", queue->name);
758 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
759 dev, tx_evtchn, xenvif_tx_interrupt, 0,
760 queue->tx_irq_name, queue);
764 disable_irq(queue->tx_irq);
766 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
767 "%s-rx", queue->name);
768 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
769 dev, rx_evtchn, xenvif_rx_interrupt, 0,
770 queue->rx_irq_name, queue);
774 disable_irq(queue->rx_irq);
780 pr_warn("Could not allocate kthread for %s\n", queue->name);
783 xenvif_disconnect_queue(queue);
787 void xenvif_carrier_off(struct xenvif *vif)
789 struct net_device *dev = vif->dev;
792 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
793 netif_carrier_off(dev); /* discard queued packets */
794 if (netif_running(dev))
800 void xenvif_disconnect_data(struct xenvif *vif)
802 struct xenvif_queue *queue = NULL;
803 unsigned int num_queues = vif->num_queues;
804 unsigned int queue_index;
806 xenvif_carrier_off(vif);
808 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
809 queue = &vif->queues[queue_index];
811 xenvif_disconnect_queue(queue);
814 xenvif_mcast_addr_list_free(vif);
817 void xenvif_disconnect_ctrl(struct xenvif *vif)
820 xenvif_deinit_hash(vif);
821 unbind_from_irqhandler(vif->ctrl_irq, vif);
825 if (vif->ctrl.sring) {
826 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
828 vif->ctrl.sring = NULL;
832 /* Reverse the relevant parts of xenvif_init_queue().
833 * Used for queue teardown from xenvif_free(), and on the
834 * error handling paths in xenbus.c:connect().
836 void xenvif_deinit_queue(struct xenvif_queue *queue)
838 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
841 void xenvif_free(struct xenvif *vif)
843 struct xenvif_queue *queues = vif->queues;
844 unsigned int num_queues = vif->num_queues;
845 unsigned int queue_index;
847 unregister_netdev(vif->dev);
848 free_netdev(vif->dev);
850 for (queue_index = 0; queue_index < num_queues; ++queue_index)
851 xenvif_deinit_queue(&queues[queue_index]);
854 module_put(THIS_MODULE);