Here is a deadlock scenario:
- netvsc_vf_up() schedules netvsc_notify_peers() work and quits.
- netvsc_vf_down() runs before netvsc_notify_peers() gets executed. As it
is being executed from netdev notifier chain we hold rtnl lock when we
get here.
- we enter while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) loop and
wait till netvsc_notify_peers() drops vf_use_cnt.
- netvsc_notify_peers() starts on some other CPU but netdev_notify_peers()
will hang on rtnl_lock().
- deadlock!
Instead of introducing additional synchronization I suggest we drop
gwrk.dwrk completely and call NETDEV_NOTIFY_PEERS directly. As we're
acting under rtnl lock this is legitimate.
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Acked-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-struct garp_wrk {
- struct work_struct dwrk;
- struct net_device *netdev;
- struct net_device_context *net_device_ctx;
-};
-
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
struct work_struct work;
u32 msg_enable; /* debug level */
struct work_struct work;
u32 msg_enable; /* debug level */
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
-static void netvsc_notify_peers(struct work_struct *wrk)
-{
- struct garp_wrk *gwrk;
-
- gwrk = container_of(wrk, struct garp_wrk, dwrk);
-
- netdev_notify_peers(gwrk->netdev);
-
- atomic_dec(&gwrk->net_device_ctx->vf_use_cnt);
-}
-
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
- /*
- * Now notify peers. We are scheduling work to
- * notify peers; take a reference to prevent
- * the VF interface from vanishing.
- */
- atomic_inc(&net_device_ctx->vf_use_cnt);
- net_device_ctx->gwrk.netdev = vf_netdev;
- net_device_ctx->gwrk.net_device_ctx = net_device_ctx;
- schedule_work(&net_device_ctx->gwrk.dwrk);
+ /* Now notify peers through VF device. */
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
- /*
- * Notify peers.
- */
- atomic_inc(&net_device_ctx->vf_use_cnt);
- net_device_ctx->gwrk.netdev = ndev;
- net_device_ctx->gwrk.net_device_ctx = net_device_ctx;
- schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ /* Now notify peers through netvsc device. */
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
- INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);