ice: Refactor VIRTCHNL_OP_CONFIG_VSI_QUEUES handling
authorBrett Creeley <brett.creeley@intel.com>
Tue, 2 Mar 2021 18:15:40 +0000 (10:15 -0800)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 7 Jun 2021 15:59:01 +0000 (08:59 -0700)
Currently, when a VF requests queue configuration via
VIRTCHNL_OP_CONFIG_VSI_QUEUES the PF driver expects that this message
will only be called once and we always assume the queues being
configured start from 0. This is incorrect and is causing issues when
a VF tries to send this message for multiple queue blocks. Fix this by
using the queue_id specified in the virtchnl message and allowing for
individual Rx and/or Tx queues to be configured.

Also, reduce the duplicated for loops for configuring the queues by
moving all the logic into a single for loop.

Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_lib.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c

index bd84c1f..135c4d9 100644 (file)
@@ -1681,6 +1681,33 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
        wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
 }
 
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+       if (q_idx >= vsi->num_rxq)
+               return -EINVAL;
+
+       return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+{
+       struct ice_aqc_add_tx_qgrp *qg_buf;
+       int err;
+
+       if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+               return -EINVAL;
+
+       qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
+       if (!qg_buf)
+               return -ENOMEM;
+
+       qg_buf->num_txqs = 1;
+
+       err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+       kfree(qg_buf);
+       return err;
+}
+
 /**
  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
  * @vsi: the VSI being configured
index 5ec857f..9bd619e 100644 (file)
@@ -12,6 +12,10 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
 
 void ice_update_eth_stats(struct ice_vsi *vsi);
 
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+
 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
 
 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
index 677d29f..5c68f11 100644 (file)
@@ -3537,10 +3537,9 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
        struct virtchnl_vsi_queue_config_info *qci =
            (struct virtchnl_vsi_queue_config_info *)msg;
        struct virtchnl_queue_pair_info *qpi;
-       u16 num_rxq = 0, num_txq = 0;
        struct ice_pf *pf = vf->pf;
        struct ice_vsi *vsi;
-       int i;
+       int i, q_idx;
 
        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3578,18 +3577,31 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                        goto error_param;
                }
+
+               q_idx = qpi->rxq.queue_id;
+
+               /* make sure selected "q_idx" is in valid range of queues
+                * for selected "vsi"
+                */
+               if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto error_param;
+               }
+
                /* copy Tx queue info from VF into VSI */
                if (qpi->txq.ring_len > 0) {
-                       num_txq++;
                        vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
                        vsi->tx_rings[i]->count = qpi->txq.ring_len;
+                       if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
                }
 
                /* copy Rx queue info from VF into VSI */
                if (qpi->rxq.ring_len > 0) {
                        u16 max_frame_size = ice_vc_get_max_frame_size(vf);
 
-                       num_rxq++;
                        vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
                        vsi->rx_rings[i]->count = qpi->rxq.ring_len;
 
@@ -3606,27 +3618,20 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                                goto error_param;
                        }
-               }
 
-               vsi->max_frame = qpi->rxq.max_pkt_size;
-               /* add space for the port VLAN since the VF driver is not
-                * expected to account for it in the MTU calculation
-                */
-               if (vf->port_vlan_info)
-                       vsi->max_frame += VLAN_HLEN;
-       }
-
-       /* VF can request to configure less than allocated queues or default
-        * allocated queues. So update the VSI with new number
-        */
-       vsi->num_txq = num_txq;
-       vsi->num_rxq = num_rxq;
-       /* All queues of VF VSI are in TC 0 */
-       vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
-       vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
+                       vsi->max_frame = qpi->rxq.max_pkt_size;
+                       /* add space for the port VLAN since the VF driver is not
+                        * expected to account for it in the MTU calculation
+                        */
+                       if (vf->port_vlan_info)
+                               vsi->max_frame += VLAN_HLEN;
 
-       if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
-               v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+                       if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+               }
+       }
 
 error_param:
        /* send the response to the VF */