return;
if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq, true);
+ do {
+ virtqueue_disable_cb(sq->vq);
+ free_old_xmit_skbs(sq, true);
+ } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+
__netif_tx_unlock(txq);
}
-
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
- netif_tx_wake_queue(txq);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
+ int opaque;
+ bool done;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
- __netif_tx_unlock(txq);
-
- virtqueue_napi_complete(napi, sq->vq, 0);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
+ opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+ done = napi_complete_done(napi, 0);
+
+ if (!done)
+ virtqueue_disable_cb(sq->vq);
+
+ __netif_tx_unlock(txq);
+
+ if (done) {
+ if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+ if (napi_schedule_prep(napi)) {
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ __netif_tx_unlock(txq);
+ __napi_schedule(napi);
+ }
+ }
+ }
+
return 0;
}
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq, false);
+ do {
+ if (use_napi)
+ virtqueue_disable_cb(sq->vq);
+
+ free_old_xmit_skbs(sq, false);
- if (use_napi && kick)
- virtqueue_enable_cb_delayed(sq->vq);
+ } while (use_napi && kick &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
/* timestamp packet in software */
skb_tx_timestamp(skb);
virtnet_set_queues(vi, vi->curr_queue_pairs);
err = virtnet_cpu_notif_add(vi);
- if (err)
+ if (err) {
+ virtnet_freeze_down(vdev);
+ remove_vq_common(vi);
return err;
+ }
return 0;
}