Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
authorJakub Kicinski <kuba@kernel.org>
Tue, 16 Aug 2022 03:14:39 +0000 (20:14 -0700)
committerJakub Kicinski <kuba@kernel.org>
Tue, 16 Aug 2022 03:14:39 +0000 (20:14 -0700)
Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-08-12 (iavf)

This series contains updates to iavf driver only.

Przemyslaw frees memory for admin queues in initialization error paths,
prevents freeing of vf_res which is causing null pointer dereference,
and adjusts calls in error path of reset to avoid iavf_close() which
could cause deadlock.

Ivan Vecera avoids deadlock that can occur when driver if part of
failover.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  iavf: Fix deadlock in initialization
  iavf: Fix reset error handling
  iavf: Fix NULL pointer dereference in iavf_get_link_ksettings
  iavf: Fix adminq error handling
====================

Link: https://lore.kernel.org/r/20220812172309.853230-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
15 files changed:
drivers/net/dsa/mv88e6060.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_vf_lib.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
drivers/net/ethernet/moxa/moxart_ether.c
include/net/neighbour.h
net/core/neighbour.c
net/core/rtnetlink.c
net/ipv6/ip6_tunnel.c
net/ipv6/ndisc.c
net/qrtr/mhi.c
net/sched/cls_route.c

index a4c6eb9..83dca91 100644 (file)
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
        int addr = REG_PORT(p);
        int ret;
 
+       if (dsa_is_unused_port(priv->ds, p))
+               return 0;
+
        /* Do not force flow control, disable Ingress and Egress
         * Header tagging, disable VLAN tunneling, and set the port
         * state to Forwarding.  Additionally, if this is the CPU
index 7d49c28..3dc3c0b 100644 (file)
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
                 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
                 * to current timer would be next second.
                 */
-               tempval = readl(fep->hwp + FEC_ATIME_CTRL);
-               tempval |= FEC_T_CTRL_CAPTURE;
-               writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
-               tempval = readl(fep->hwp + FEC_ATIME);
+               tempval = fep->cc.read(&fep->cc);
                /* Convert the ptp local counter to 1588 timestamp */
                ns = timecounter_cyc2time(&fep->tc, tempval);
                ts = ns_to_timespec64(ns);
index a830f7f..0d4dbca 100644 (file)
@@ -3181,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
 
        pf = vsi->back;
        vtype = vsi->type;
-       if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+       if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
                return -EINVAL;
 
        ice_vsi_init_vlan_ops(vsi);
index 8fd7c3e..76f70fe 100644 (file)
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
 
        if (ice_is_vf_disabled(vf)) {
                vsi = ice_get_vf_vsi(vf);
-               if (WARN_ON(!vsi))
+               if (!vsi) {
+                       dev_dbg(dev, "VF is already removed\n");
                        return -EINVAL;
+               }
                ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
                ice_vsi_stop_all_rx_rings(vsi);
                dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
index 1e240cd..30c7b0e 100644 (file)
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
 
        cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
        cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
-       mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
        unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+       mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
        mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
        mlxsw_sp->ports[local_port] = NULL;
        mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
index 2e0b704..7b01b9c 100644 (file)
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
                                          * enabled.
                                          */
        struct hwtstamp_config config;
+       struct mutex lock; /* Protects 'config' and HW configuration. */
 };
 
 struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
                goto err_ptp_traps_set;
 
        refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+       mutex_init(&ptp_state->lock);
        return &ptp_state->common;
 
 err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
 
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
 
+       mutex_destroy(&ptp_state->lock);
        mlxsw_sp_ptp_traps_unset(mlxsw_sp);
        kfree(ptp_state);
 }
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
 
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
 
+       mutex_lock(&ptp_state->lock);
        *config = ptp_state->config;
+       mutex_unlock(&ptp_state->lock);
+
        return 0;
 }
 
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
                return -EINVAL;
        }
 
+       if ((ing_types && !egr_types) || (!ing_types && egr_types))
+               return -EINVAL;
+
        *p_ing_types = ing_types;
        *p_egr_types = egr_types;
        return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
        struct mlxsw_sp2_ptp_state *ptp_state;
        int err;
 
-       ASSERT_RTNL();
-
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
 
        if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
        struct mlxsw_sp2_ptp_state *ptp_state;
        int err;
 
-       ASSERT_RTNL();
-
        ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
 
        if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
 int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
                               struct hwtstamp_config *config)
 {
+       struct mlxsw_sp2_ptp_state *ptp_state;
        enum hwtstamp_rx_filters rx_filter;
        struct hwtstamp_config new_config;
        u16 new_ing_types, new_egr_types;
        bool ptp_enabled;
        int err;
 
+       ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+       mutex_lock(&ptp_state->lock);
+
        err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
                                              &new_egr_types, &rx_filter);
        if (err)
-               return err;
+               goto err_get_message_types;
 
        new_config.flags = config->flags;
        new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
                err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
                                                   new_egr_types, new_config);
                if (err)
-                       return err;
+                       goto err_configure_port;
        } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
                err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
                if (err)
-                       return err;
+                       goto err_deconfigure_port;
        }
 
        mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
        /* Notify the ioctl caller what we are actually timestamping. */
        config->rx_filter = rx_filter;
+       mutex_unlock(&ptp_state->lock);
 
        return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+       mutex_unlock(&ptp_state->lock);
+       return err;
 }
 
 int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
index 2d1628f..a8b8823 100644 (file)
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 {
 }
 
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
-                                struct mlxsw_sp_port *mlxsw_sp_port,
-                                struct sk_buff *skb,
-                                const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                            struct mlxsw_sp_port *mlxsw_sp_port,
+                            struct sk_buff *skb,
+                            const struct mlxsw_tx_info *tx_info)
 {
        return -EOPNOTSUPP;
 }
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
        return mlxsw_sp_ptp_get_ts_info_noptp(info);
 }
 
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
-                                 struct mlxsw_sp_port *mlxsw_sp_port,
-                                 struct sk_buff *skb,
-                                 const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+                             struct mlxsw_sp_port *mlxsw_sp_port,
+                             struct sk_buff *skb,
+                             const struct mlxsw_tx_info *tx_info)
 {
        return -EOPNOTSUPP;
 }
index a3214a7..f11f1cb 100644 (file)
@@ -77,7 +77,7 @@ static void moxart_mac_free_memory(struct net_device *ndev)
        int i;
 
        for (i = 0; i < RX_DESC_NUM; i++)
-               dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+               dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
                                 priv->rx_buf_size, DMA_FROM_DEVICE);
 
        if (priv->tx_desc_base)
@@ -147,11 +147,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
                       desc + RX_REG_OFFSET_DESC1);
 
                priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
-               priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+               priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
                                                     priv->rx_buf[i],
                                                     priv->rx_buf_size,
                                                     DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+               if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
                        netdev_err(ndev, "DMA mapping error\n");
 
                moxart_desc_write(priv->rx_mapping[i],
@@ -240,7 +240,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (len > RX_BUF_SIZE)
                        len = RX_BUF_SIZE;
 
-               dma_sync_single_for_cpu(&ndev->dev,
+               dma_sync_single_for_cpu(&priv->pdev->dev,
                                        priv->rx_mapping[rx_head],
                                        priv->rx_buf_size, DMA_FROM_DEVICE);
                skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +294,7 @@ static void moxart_tx_finished(struct net_device *ndev)
        unsigned int tx_tail = priv->tx_tail;
 
        while (tx_tail != tx_head) {
-               dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+               dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
                                 priv->tx_len[tx_tail], DMA_TO_DEVICE);
 
                ndev->stats.tx_packets++;
@@ -358,9 +358,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
 
        len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
 
-       priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+       priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
                                                   len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+       if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
                netdev_err(ndev, "DMA mapping error\n");
                goto out_unlock;
        }
@@ -379,7 +379,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
                len = ETH_ZLEN;
        }
 
-       dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+       dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
                                   priv->tx_buf_size, DMA_TO_DEVICE);
 
        txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -493,7 +493,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
        priv->tx_buf_size = TX_BUF_SIZE;
        priv->rx_buf_size = RX_BUF_SIZE;
 
-       priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+       priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
                                                GFP_DMA | GFP_KERNEL);
        if (!priv->tx_desc_base) {
@@ -501,7 +501,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
                goto init_fail;
        }
 
-       priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+       priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
                                                RX_DESC_NUM, &priv->rx_base,
                                                GFP_DMA | GFP_KERNEL);
        if (!priv->rx_desc_base) {
index 9f0bab0..3827a6b 100644 (file)
@@ -83,6 +83,7 @@ struct neigh_parms {
        struct rcu_head rcu_head;
 
        int     reachable_time;
+       int     qlen;
        int     data[NEIGH_VAR_DATA_MAX];
        DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 };
index 6a8c259..5b669eb 100644 (file)
@@ -307,14 +307,32 @@ static int neigh_del_timer(struct neighbour *n)
        return 0;
 }
 
-static void pneigh_queue_purge(struct sk_buff_head *list)
+static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
 {
+       unsigned long flags;
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(list)) != NULL) {
-               dev_put(skb->dev);
-               kfree_skb(skb);
+       spin_lock_irqsave(&list->lock, flags);
+       skb = skb_peek(list);
+       while (skb != NULL) {
+               struct sk_buff *skb_next = skb_peek_next(skb, list);
+               struct net_device *dev = skb->dev;
+               if (net == NULL || net_eq(dev_net(dev), net)) {
+                       struct in_device *in_dev;
+
+                       rcu_read_lock();
+                       in_dev = __in_dev_get_rcu(dev);
+                       if (in_dev)
+                               in_dev->arp_parms->qlen--;
+                       rcu_read_unlock();
+                       __skb_unlink(skb, list);
+
+                       dev_put(dev);
+                       kfree_skb(skb);
+               }
+               skb = skb_next;
        }
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
@@ -385,9 +403,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
        write_lock_bh(&tbl->lock);
        neigh_flush_dev(tbl, dev, skip_perm);
        pneigh_ifdown_and_unlock(tbl, dev);
-
-       del_timer_sync(&tbl->proxy_timer);
-       pneigh_queue_purge(&tbl->proxy_queue);
+       pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
+       if (skb_queue_empty_lockless(&tbl->proxy_queue))
+               del_timer_sync(&tbl->proxy_timer);
        return 0;
 }
 
@@ -1597,8 +1615,15 @@ static void neigh_proxy_process(struct timer_list *t)
 
                if (tdif <= 0) {
                        struct net_device *dev = skb->dev;
+                       struct in_device *in_dev;
 
+                       rcu_read_lock();
+                       in_dev = __in_dev_get_rcu(dev);
+                       if (in_dev)
+                               in_dev->arp_parms->qlen--;
+                       rcu_read_unlock();
                        __skb_unlink(skb, &tbl->proxy_queue);
+
                        if (tbl->proxy_redo && netif_running(dev)) {
                                rcu_read_lock();
                                tbl->proxy_redo(skb);
@@ -1623,7 +1648,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
        unsigned long sched_next = jiffies +
                        prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
 
-       if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
+       if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
                kfree_skb(skb);
                return;
        }
@@ -1639,6 +1664,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
        skb_dst_drop(skb);
        dev_hold(skb->dev);
        __skb_queue_tail(&tbl->proxy_queue, skb);
+       p->qlen++;
        mod_timer(&tbl->proxy_timer, sched_next);
        spin_unlock(&tbl->proxy_queue.lock);
 }
@@ -1671,6 +1697,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                refcount_set(&p->refcnt, 1);
                p->reachable_time =
                                neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+               p->qlen = 0;
                netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
                p->dev = dev;
                write_pnet(&p->net, net);
@@ -1736,6 +1763,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
        refcount_set(&tbl->parms.refcnt, 1);
        tbl->parms.reachable_time =
                          neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
+       tbl->parms.qlen = 0;
 
        tbl->stats = alloc_percpu(struct neigh_statistics);
        if (!tbl->stats)
@@ -1787,7 +1815,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
        cancel_delayed_work_sync(&tbl->managed_work);
        cancel_delayed_work_sync(&tbl->gc_work);
        del_timer_sync(&tbl->proxy_timer);
-       pneigh_queue_purge(&tbl->proxy_queue);
+       pneigh_queue_purge(&tbl->proxy_queue, NULL);
        neigh_ifdown(tbl, NULL);
        if (atomic_read(&tbl->entries))
                pr_crit("neighbour leakage\n");
index ac45328..4b5b15c 100644 (file)
@@ -6070,6 +6070,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
            !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
                NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
+               module_put(owner);
                goto err_unlock;
        }
 
index 3fda563..79c6a82 100644 (file)
@@ -1517,7 +1517,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
  *   ip6_tnl_change() updates the tunnel parameters
  **/
 
-static int
+static void
 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 {
        t->parms.laddr = p->laddr;
@@ -1531,29 +1531,25 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
        t->parms.fwmark = p->fwmark;
        dst_cache_reset(&t->dst_cache);
        ip6_tnl_link_config(t);
-       return 0;
 }
 
-static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
 {
        struct net *net = t->net;
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       int err;
 
        ip6_tnl_unlink(ip6n, t);
        synchronize_net();
-       err = ip6_tnl_change(t, p);
+       ip6_tnl_change(t, p);
        ip6_tnl_link(ip6n, t);
        netdev_state_change(t->dev);
-       return err;
 }
 
-static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
 {
        /* for default tnl0 device allow to change only the proto */
        t->parms.proto = p->proto;
        netdev_state_change(t->dev);
-       return 0;
 }
 
 static void
@@ -1667,9 +1663,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
                        } else
                                t = netdev_priv(dev);
                        if (dev == ip6n->fb_tnl_dev)
-                               err = ip6_tnl0_update(t, &p1);
+                               ip6_tnl0_update(t, &p1);
                        else
-                               err = ip6_tnl_update(t, &p1);
+                               ip6_tnl_update(t, &p1);
                }
                if (!IS_ERR(t)) {
                        err = 0;
@@ -2091,7 +2087,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
        } else
                t = netdev_priv(dev);
 
-       return ip6_tnl_update(t, &p);
+       ip6_tnl_update(t, &p);
+       return 0;
 }
 
 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
index 9845369..3a55349 100644 (file)
@@ -1378,6 +1378,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        if (!rt && lifetime) {
                ND_PRINTK(3, info, "RA: adding default router\n");
 
+               if (neigh)
+                       neigh_release(neigh);
+
                rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
                                         skb->dev, pref, defrtr_usr_metric);
                if (!rt) {
index 18196e1..9ced13c 100644 (file)
@@ -78,11 +78,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        struct qrtr_mhi_dev *qdev;
        int rc;
 
-       /* start channels */
-       rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
-       if (rc)
-               return rc;
-
        qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
        if (!qdev)
                return -ENOMEM;
@@ -96,6 +91,13 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        if (rc)
                return rc;
 
+       /* start channels */
+       rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
+       if (rc) {
+               qrtr_endpoint_unregister(&qdev->ep);
+               return rc;
+       }
+
        dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
 
        return 0;
index 3f935cb..48712bc 100644 (file)
@@ -424,6 +424,11 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
                        return -EINVAL;
        }
 
+       if (!nhandle) {
+               NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
+               return -EINVAL;
+       }
+
        h1 = to_hash(nhandle);
        b = rtnl_dereference(head->table[h1]);
        if (!b) {
@@ -477,6 +482,11 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
        int err;
        bool new = true;
 
+       if (!handle) {
+               NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
+               return -EINVAL;
+       }
+
        if (opt == NULL)
                return handle ? -EINVAL : 0;