return tun;
}
+static void tun_queue_purge(struct tun_file *tfile)
+{
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ skb_queue_purge(&tfile->sk.sk_error_queue);
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
tun_set_real_num_queues(tun);
} else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);