}
/* Called from bottom half context */
-static struct sk_buff *page_to_skb(struct receive_queue *rq,
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+ struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
unsigned int copy, hdr_len, hdr_padded_len;
}
static struct sk_buff *receive_big(struct net_device *dev,
+ struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
unsigned int len)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
+ struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb))
goto err;
int offset = buf - page_address(page);
unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
- struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
+ struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
+ truesize);
struct sk_buff *curr_skb = head_skb;
if (unlikely(!curr_skb))
return NULL;
}
-static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
else if (vi->big_packets)
- skb = receive_big(dev, rq, buf, len);
+ skb = receive_big(dev, vi, rq, buf, len);
else
skb = receive_small(buf, len);
dev_kfree_skb(skb);
}
-static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
+ gfp_t gfp)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
int err;
* before we're receiving packets, or from refill_work which is
* careful to disable receiving (using napi_disable).
*/
-static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
+static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
+ gfp_t gfp)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
int err;
bool oom;
else if (vi->big_packets)
err = add_recvbuf_big(rq, gfp);
else
- err = add_recvbuf_small(rq, gfp);
+ err = add_recvbuf_small(vi, rq, gfp);
oom = err == -ENOMEM;
if (err)
struct receive_queue *rq = &vi->rq[i];
napi_disable(&rq->napi);
- still_empty = !try_fill_recv(rq, GFP_KERNEL);
+ still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq);
/* In theory, this can happen: if we don't get any buffers in
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- receive_buf(rq, buf, len);
+ receive_buf(vi, rq, buf, len);
received++;
}
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
- if (!try_fill_recv(rq, GFP_ATOMIC))
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
}
/* Last of all, set up some receive buffers. */
for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(&vi->rq[i], GFP_KERNEL);
+ try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
if (vi->rq[i].vq->num_free ==
if (netif_running(vi->dev)) {
for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++)