#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
#define I40EVF_QUEUE_STATS_LEN(_dev) \
- (((struct i40evf_adapter *) \
+ (((struct i40evf_adapter *)\
netdev_priv(_dev))->num_active_queues \
* 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40EVF_STATS_LEN(_dev) \
static u32 i40evf_get_msglevel(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+
return adapter->msg_enable;
}
static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+
adapter->msg_enable = data;
}
* but the number of rings is not reported.
**/
static void i40evf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
* this functionality.
**/
static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
* Change current coalescing settings.
**/
static int i40evf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+
wr32(hw, I40E_VFINT_DYN_CTL01, 0);
/* read flush */
static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+
wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
}
/* read flush */
rd32(hw, I40E_VFGEN_RSTAT);
-
}
/**
* @adapter: board private structure
* @mask: bitmap of vectors to trigger
**/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
- u32 mask)
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
{
struct i40e_hw *hw = &adapter->hw;
int i;
{
int i;
int q_vectors;
+
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (i = 0; i < q_vectors; i++) {
{
struct i40e_hw *hw = &adapter->hw;
int i;
+
for (i = 0; i < adapter->num_active_queues; i++)
adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
}
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi;
+
q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi;
napi_enable(napi);
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = adapter->rx_rings[i];
+
i40evf_alloc_rx_buffers(ring, ring->count);
ring->next_to_use = ring->count - 1;
writel(ring->next_to_use, ring->tail);
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, NAPI_POLL_WEIGHT);
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
}
}
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
- (adapter->num_active_queues > 1) ? "Enabled" :
- "Disabled", adapter->num_active_queues);
+ (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_active_queues);
return 0;
err_alloc_queues:
static void i40evf_watchdog_timer(unsigned long data)
{
struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+
schedule_work(&adapter->watchdog_task);
/* timer will be rescheduled in watchdog task */
}
static void i40evf_watchdog_task(struct work_struct *work)
{
struct i40evf_adapter *adapter = container_of(work,
- struct i40evf_adapter,
- watchdog_task);
+ struct i40evf_adapter,
+ watchdog_task);
struct i40e_hw *hw = &adapter->hw;
uint32_t rstat_val;
/* check for reset */
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED)) {
/* kill and reinit the admin queue */
if (i40evf_shutdown_adminq(hw))
dev_warn(&adapter->pdev->dev,
- "%s: Failed to destroy the Admin Queue resources\n",
- __func__);
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
err = i40evf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ __func__, err);
adapter->aq_pending = 0;
adapter->aq_required = 0;
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
v_msg->v_retval, event.msg_buf,
event.msg_len);
- if (pending != 0) {
+ if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
- }
} while (pending);
/* check for error indications */
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc)
i40evf_free_tx_resources(adapter->tx_rings[i]);
-
}
/**
err = i40evf_check_reset_complete(hw);
if (err) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
err = i40evf_verify_api_ver(adapter);
if (err) {
dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
- err);
+ err);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pdev->dev, "Resending request\n");
err = i40evf_send_api_ver(adapter);
static int __init i40evf_init_module(void)
{
int ret;
+
pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
- i40evf_driver_version);
+ i40evf_driver_version);
pr_info("%s\n", i40evf_copyright);
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
void i40evf_request_stats(struct i40evf_adapter *adapter)
{
struct i40e_virtchnl_queue_select vqs;
+
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */
return;
"%s: Unknown event %d from pf\n",
__func__, vpe->event);
break;
-
}
return;
}
break;
default:
dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
- __func__, v_opcode);
+ __func__, v_opcode);
break;
} /* switch v_opcode */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;