#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
-#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
return;
}
- use_mm(net->dev.mm);
mutex_lock(&vq->mutex);
vhost_disable_notify(vq);
}
mutex_unlock(&vq->mutex);
- unuse_mm(net->dev.mm);
}
static int peek_head_len(struct sock *sk)
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return;
- use_mm(net->dev.mm);
mutex_lock(&vq->mutex);
vhost_disable_notify(vq);
hdr_size = vq->vhost_hlen;
}
mutex_unlock(&vq->mutex);
- unuse_mm(net->dev.mm);
}
/* Expects to be always run from workqueue - which acts as
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return;
- use_mm(net->dev.mm);
mutex_lock(&vq->mutex);
vhost_disable_notify(vq);
vhost_hlen = vq->vhost_hlen;
}
mutex_unlock(&vq->mutex);
- unuse_mm(net->dev.mm);
}
static void handle_rx(struct vhost_net *net)
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
struct vhost_work *work = NULL;
unsigned uninitialized_var(seq);
+ use_mm(dev->mm);
+
for (;;) {
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
- return 0;
+ break;
}
if (!list_empty(&dev->work_list)) {
work = list_first_entry(&dev->work_list,
schedule();
}
+ unuse_mm(dev->mm);
+ return 0;
}
/* Helper to allocate iovec buffers for all vqs. */