Merge branch 'linus' into irq/core, to fix conflict
authorIngo Molnar <mingo@kernel.org>
Sat, 8 Jan 2022 09:53:57 +0000 (10:53 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 8 Jan 2022 09:53:57 +0000 (10:53 +0100)
Conflicts:
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
drivers/infiniband/hw/irdma/hw.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c

@@@ -60,6 -60,8 +60,8 @@@ static void irdma_iwarp_ce_handler(stru
  {
        struct irdma_cq *cq = iwcq->back_cq;
  
+       if (!cq->user_mode)
+               cq->armed = false;
        if (cq->ibcq.comp_handler)
                cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  }
@@@ -146,6 -148,7 +148,7 @@@ static void irdma_set_flush_fields(stru
                qp->flush_code = FLUSH_PROT_ERR;
                break;
        case IRDMA_AE_AMP_BAD_QP:
+       case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
                qp->flush_code = FLUSH_LOC_QP_OP_ERR;
                break;
        case IRDMA_AE_AMP_BAD_STAG_KEY:
        case IRDMA_AE_PRIV_OPERATION_DENIED:
        case IRDMA_AE_IB_INVALID_REQUEST:
        case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
-       case IRDMA_AE_IB_REMOTE_OP_ERROR:
                qp->flush_code = FLUSH_REM_ACCESS_ERR;
                qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
                break;
        case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
                qp->flush_code = FLUSH_MW_BIND_ERR;
                break;
+       case IRDMA_AE_IB_REMOTE_OP_ERROR:
+               qp->flush_code = FLUSH_REM_OP_ERR;
+               break;
        default:
                qp->flush_code = FLUSH_FATAL_ERR;
                break;
@@@ -545,7 -550,7 +550,7 @@@ static void irdma_destroy_irq(struct ir
        struct irdma_sc_dev *dev = &rf->sc_dev;
  
        dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
 -      irq_set_affinity_hint(msix_vec->irq, NULL);
 +      irq_update_affinity_hint(msix_vec->irq, NULL);
        free_irq(msix_vec->irq, dev_id);
  }
  
@@@ -1095,7 -1100,7 +1100,7 @@@ irdma_cfg_ceq_vector(struct irdma_pci_
        }
        cpumask_clear(&msix_vec->mask);
        cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
 -      irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
 +      irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
        if (status) {
                ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
                return IRDMA_ERR_CFG;
@@@ -99,6 -99,24 +99,24 @@@ MODULE_LICENSE("GPL v2")
  
  static struct workqueue_struct *i40e_wq;
  
+ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+                                 struct net_device *netdev, int delta)
+ {
+       struct netdev_hw_addr *ha;
+       if (!f || !netdev)
+               return;
+       netdev_for_each_mc_addr(ha, netdev) {
+               if (ether_addr_equal(ha->addr, f->macaddr)) {
+                       ha->refcount += delta;
+                       if (ha->refcount <= 0)
+                               ha->refcount = 1;
+                       break;
+               }
+       }
+ }
  /**
   * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
   * @hw:   pointer to the HW structure
@@@ -2036,6 -2054,7 +2054,7 @@@ static void i40e_undo_add_filter_entrie
        hlist_for_each_entry_safe(new, h, from, hlist) {
                /* We can simply free the wrapper structure */
                hlist_del(&new->hlist);
+               netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
                kfree(new);
        }
  }
@@@ -2383,6 -2402,10 +2402,10 @@@ int i40e_sync_vsi_filters(struct i40e_v
                                                       &tmp_add_list,
                                                       &tmp_del_list,
                                                       vlan_filters);
+               hlist_for_each_entry(new, &tmp_add_list, hlist)
+                       netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
                if (retval)
                        goto err_no_memory_locked;
  
                        if (new->f->state == I40E_FILTER_NEW)
                                new->f->state = new->state;
                        hlist_del(&new->hlist);
+                       netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
                        kfree(new);
                }
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@@ -3891,10 -3915,10 +3915,10 @@@ static int i40e_vsi_request_irq_msix(st
                 *
                 * get_cpu_mask returns a static constant mask with
                 * a permanent lifetime so it's ok to pass to
 -               * irq_set_affinity_hint without making a copy.
 +               * irq_update_affinity_hint without making a copy.
                 */
                cpu = cpumask_local_spread(q_vector->v_idx, -1);
 -              irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
 +              irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
        }
  
        vsi->irqs_ready = true;
@@@ -3905,7 -3929,7 +3929,7 @@@ free_queue_irqs
                vector--;
                irq_num = pf->msix_entries[base + vector].vector;
                irq_set_affinity_notifier(irq_num, NULL);
 -              irq_set_affinity_hint(irq_num, NULL);
 +              irq_update_affinity_hint(irq_num, NULL);
                free_irq(irq_num, &vsi->q_vectors[vector]);
        }
        return err;
@@@ -4726,7 -4750,7 +4750,7 @@@ static void i40e_vsi_free_irq(struct i4
                        /* clear the affinity notifier in the IRQ descriptor */
                        irq_set_affinity_notifier(irq_num, NULL);
                        /* remove our suggested affinity mask for this IRQ */
 -                      irq_set_affinity_hint(irq_num, NULL);
 +                      irq_update_affinity_hint(irq_num, NULL);
                        synchronize_irq(irq_num);
                        free_irq(irq_num, vsi->q_vectors[i]);
  
@@@ -8717,6 -8741,27 +8741,27 @@@ int i40e_open(struct net_device *netdev
  }
  
  /**
+  * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
+  * @vsi: vsi structure
+  *
+  * This updates netdev's number of tx/rx queues
+  *
+  * Returns status of setting tx/rx queues
+  **/
+ static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
+ {
+       int ret;
+       ret = netif_set_real_num_rx_queues(vsi->netdev,
+                                          vsi->num_queue_pairs);
+       if (ret)
+               return ret;
+       return netif_set_real_num_tx_queues(vsi->netdev,
+                                           vsi->num_queue_pairs);
+ }
+ /**
   * i40e_vsi_open -
   * @vsi: the VSI to open
   *
@@@ -8752,13 -8797,7 +8797,7 @@@ int i40e_vsi_open(struct i40e_vsi *vsi
                        goto err_setup_rx;
  
                /* Notify the stack of the actual queue counts. */
-               err = netif_set_real_num_tx_queues(vsi->netdev,
-                                                  vsi->num_queue_pairs);
-               if (err)
-                       goto err_set_queues;
-               err = netif_set_real_num_rx_queues(vsi->netdev,
-                                                  vsi->num_queue_pairs);
+               err = i40e_netif_set_realnum_tx_rx_queues(vsi);
                if (err)
                        goto err_set_queues;
  
@@@ -14151,6 -14190,9 +14190,9 @@@ struct i40e_vsi *i40e_vsi_setup(struct 
                ret = i40e_config_netdev(vsi);
                if (ret)
                        goto err_netdev;
+               ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
+               if (ret)
+                       goto err_netdev;
                ret = register_netdev(vsi->netdev);
                if (ret)
                        goto err_netdev;
@@@ -15451,8 -15493,8 +15493,8 @@@ static int i40e_probe(struct pci_dev *p
  
        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
            hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
-               dev_info(&pdev->dev,
-                        "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+               dev_dbg(&pdev->dev,
+                       "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
                         hw->aq.api_maj_ver,
                         hw->aq.api_min_ver,
                         I40E_FW_API_VERSION_MAJOR,
@@@ -492,10 -492,10 +492,10 @@@ iavf_request_traffic_irqs(struct iavf_a
                irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
                /* Spread the IRQ affinity hints across online CPUs. Note that
                 * get_cpu_mask returns a mask with a permanent lifetime so
 -               * it's safe to use as a hint for irq_set_affinity_hint.
 +               * it's safe to use as a hint for irq_update_affinity_hint.
                 */
                cpu = cpumask_local_spread(q_vector->v_idx, -1);
 -              irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
 +              irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
        }
  
        return 0;
@@@ -505,7 -505,7 +505,7 @@@ free_queue_irqs
                vector--;
                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
                irq_set_affinity_notifier(irq_num, NULL);
 -              irq_set_affinity_hint(irq_num, NULL);
 +              irq_update_affinity_hint(irq_num, NULL);
                free_irq(irq_num, &adapter->q_vectors[vector]);
        }
        return err;
@@@ -557,7 -557,7 +557,7 @@@ static void iavf_free_traffic_irqs(stru
        for (vector = 0; vector < q_vectors; vector++) {
                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
                irq_set_affinity_notifier(irq_num, NULL);
 -              irq_set_affinity_hint(irq_num, NULL);
 +              irq_update_affinity_hint(irq_num, NULL);
                free_irq(irq_num, &adapter->q_vectors[vector]);
        }
  }
@@@ -2046,6 -2046,7 +2046,7 @@@ static void iavf_watchdog_task(struct w
                }
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+               mutex_unlock(&adapter->crit_lock);
                queue_delayed_work(iavf_wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
                        iavf_detect_recover_hung(&adapter->vsi);
                break;
        case __IAVF_REMOVE:
-               mutex_unlock(&adapter->crit_lock);
-               return;
        default:
+               mutex_unlock(&adapter->crit_lock);
                return;
        }
  
        /* check for hw reset */
        reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
        if (!reg_val) {
-               iavf_change_state(adapter, __IAVF_RESETTING);
                adapter->flags |= IAVF_FLAG_RESET_PENDING;
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@@ -2248,6 -2247,7 +2247,7 @@@ static void iavf_reset_task(struct work
        }
  
        pci_set_master(adapter->pdev);
+       pci_restore_msi_state(adapter->pdev);
  
        if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
@@@ -2708,8 -2708,11 +2708,11 @@@ static int iavf_validate_ch_config(stru
                total_max_rate += tx_rate;
                num_qps += mqprio_qopt->qopt.count[i];
        }
-       if (num_qps > IAVF_MAX_REQ_QUEUES)
+       if (num_qps > adapter->num_active_queues) {
+               dev_err(&adapter->pdev->dev,
+                       "Cannot support requested number of queues\n");
                return -EINVAL;
+       }
  
        ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
        return ret;
@@@ -3247,8 -3247,8 +3247,8 @@@ static int ixgbe_request_msix_irqs(stru
                /* If Flow Director is enabled, set interrupt affinity */
                if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                        /* assign the mask for this irq */
 -                      irq_set_affinity_hint(entry->vector,
 -                                            &q_vector->affinity_mask);
 +                      irq_update_affinity_hint(entry->vector,
 +                                               &q_vector->affinity_mask);
                }
        }
  
  free_queue_irqs:
        while (vector) {
                vector--;
 -              irq_set_affinity_hint(adapter->msix_entries[vector].vector,
 -                                    NULL);
 +              irq_update_affinity_hint(adapter->msix_entries[vector].vector,
 +                                       NULL);
                free_irq(adapter->msix_entries[vector].vector,
                         adapter->q_vector[vector]);
        }
@@@ -3398,7 -3398,7 +3398,7 @@@ static void ixgbe_free_irq(struct ixgbe
                        continue;
  
                /* clear the affinity_mask in the IRQ descriptor */
 -              irq_set_affinity_hint(entry->vector, NULL);
 +              irq_update_affinity_hint(entry->vector, NULL);
  
                free_irq(entry->vector, q_vector);
        }
@@@ -5531,6 -5531,10 +5531,10 @@@ static int ixgbe_non_sfp_link_config(st
        if (!speed && hw->mac.ops.get_link_capabilities) {
                ret = hw->mac.ops.get_link_capabilities(hw, &speed,
                                                        &autoneg);
+               /* remove NBASE-T speeds from default autonegotiation
+                * to accommodate broken network switches in the field
+                * which cannot cope with advertised NBASE-T speeds
+                */
                speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
                           IXGBE_LINK_SPEED_2_5GB_FULL);
        }
@@@ -143,11 -143,11 +143,11 @@@ static void irq_release(struct mlx5_ir
        struct mlx5_irq_pool *pool = irq->pool;
  
        xa_erase(&pool->irqs, irq->index);
 -      /* free_irq requires that affinity and rmap will be cleared
 +      /* free_irq requires that affinity_hint and rmap will be cleared
         * before calling it. This is why there is asymmetry with set_rmap
         * which should be called after alloc_irq but before request_irq.
         */
 -      irq_set_affinity_hint(irq->irqn, NULL);
 +      irq_update_affinity_hint(irq->irqn, NULL);
        free_cpumask_var(irq->mask);
        free_irq(irq->irqn, &irq->nh);
        kfree(irq);
@@@ -316,7 -316,7 +316,7 @@@ static struct mlx5_irq *irq_pool_create
        if (IS_ERR(irq))
                return irq;
        cpumask_copy(irq->mask, affinity);
 -      irq_set_affinity_hint(irq->irqn, irq->mask);
 +      irq_set_affinity_and_hint(irq->irqn, irq->mask);
        return irq;
  }
  
@@@ -356,8 -356,8 +356,8 @@@ static struct mlx5_irq *irq_pool_reques
        new_irq = irq_pool_create_irq(pool, affinity);
        if (IS_ERR(new_irq)) {
                if (!least_loaded_irq) {
-                       mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
-                                     cpumask_first(affinity));
+                       mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
+                                     PTR_ERR(new_irq));
                        mutex_unlock(&pool->lock);
                        return new_irq;
                }
@@@ -398,8 -398,8 +398,8 @@@ irq_pool_request_vector(struct mlx5_irq
        cpumask_copy(irq->mask, affinity);
        if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
            cpumask_empty(irq->mask))
-               cpumask_set_cpu(0, irq->mask);
+               cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
 -      irq_set_affinity_hint(irq->irqn, irq->mask);
 +      irq_set_affinity_and_hint(irq->irqn, irq->mask);
  unlock:
        mutex_unlock(&pool->lock);
        return irq;