struct netpoll_info {
atomic_t refcnt;
- int rx_flags;
+ unsigned long rx_flags;
spinlock_t rx_lock;
+ struct mutex dev_lock;
struct list_head rx_np; /* netpolls that registered an rx_hook */
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
struct rcu_head rcu;
};
+#ifdef CONFIG_NETPOLL
+extern int netpoll_rx_disable(struct net_device *dev);
+extern void netpoll_rx_enable(struct net_device *dev);
+#else
+static inline int netpoll_rx_disable(struct net_device *dev) { return 0; }
+static inline void netpoll_rx_enable(struct net_device *dev) { return; }
+#endif
+
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
if (!netif_device_present(dev))
return -ENODEV;
+ /* Block netpoll from trying to do any rx path servicing.
+ * If we don't do this there is a chance ndo_poll_controller
+ * or ndo_poll may be running while we open the device
+ */
+ ret = netpoll_rx_disable(dev);
+ if (ret)
+ return ret;
+
ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
ret = notifier_to_errno(ret);
if (ret)
if (!ret && ops->ndo_open)
ret = ops->ndo_open(dev);
+ netpoll_rx_enable(dev);
+
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
int retval;
LIST_HEAD(single);
+ /* Temporarily disable netpoll until the interface is down */
+ retval = netpoll_rx_disable(dev);
+ if (retval)
+ return retval;
+
list_add(&dev->unreg_list, &single);
retval = __dev_close_many(&single);
list_del(&single);
+
+ netpoll_rx_enable(dev);
return retval;
}
*/
int dev_close(struct net_device *dev)
{
+ int ret = 0;
if (dev->flags & IFF_UP) {
LIST_HEAD(single);
+ /* Block netpoll rx while the interface is going down */
+ ret = netpoll_rx_disable(dev);
+ if (ret)
+ return ret;
+
list_add(&dev->unreg_list, &single);
dev_close_many(&single);
list_del(&single);
+
+ netpoll_rx_enable(dev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL(dev_close);
static atomic_t trapped;
+static struct srcu_struct netpoll_srcu;
+
#define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+ /* Don't do any rx activity if the dev_lock mutex is held
+ * the dev_open/close paths use this to block netpoll activity
+ * while changing device state
+ */
+ if (!mutex_trylock(&dev->npinfo->dev_lock))
+ return;
+
if (!dev || !netif_running(dev))
return;
poll_napi(dev);
+ mutex_unlock(&dev->npinfo->dev_lock);
+
if (dev->flags & IFF_SLAVE) {
if (ni) {
struct net_device *bond_dev;
zap_completion_queue();
}
+int netpoll_rx_disable(struct net_device *dev)
+{
+ struct netpoll_info *ni;
+ int idx;
+ might_sleep();
+ idx = srcu_read_lock(&netpoll_srcu);
+ ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
+ if (ni)
+ mutex_lock(&ni->dev_lock);
+ srcu_read_unlock(&netpoll_srcu, idx);
+ return 0;
+}
+EXPORT_SYMBOL(netpoll_rx_disable);
+
+void netpoll_rx_enable(struct net_device *dev)
+{
+ struct netpoll_info *ni;
+ rcu_read_lock();
+ ni = rcu_dereference(dev->npinfo);
+ if (ni)
+ mutex_unlock(&ni->dev_lock);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(netpoll_rx_enable);
+
static void refill_skbs(void)
{
struct sk_buff *skb;
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
+ mutex_init(&npinfo->dev_lock);
skb_queue_head_init(&npinfo->neigh_tx);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
static int __init netpoll_init(void)
{
skb_queue_head_init(&skb_pool);
+ init_srcu_struct(&netpoll_srcu);
return 0;
}
core_initcall(netpoll_init);
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
+ synchronize_srcu(&netpoll_srcu);
+
if (atomic_dec_and_test(&npinfo->refcnt)) {
const struct net_device_ops *ops;