ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
- goto out_clk;
+ goto out_m_can_class_free_dev;
- ret = tcan4x5x_parse_config(mcan_class);
+ ret = tcan4x5x_get_gpios(mcan_class);
if (ret)
goto out_power;
out_power:
tcan4x5x_power_enable(priv->power, 0);
- out_clk:
- if (!IS_ERR(mcan_class->cclk)) {
- clk_disable_unprepare(mcan_class->cclk);
- clk_disable_unprepare(mcan_class->hclk);
- }
out_m_can_class_free_dev:
m_can_class_free_dev(mcan_class->net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
-
return ret;
}
return frames_processed;
}
- if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
+ if (!pending_scrq(adapter, rx_scrq))
break;
- next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
+ /* The queue entry at the current index is peeked at above
+ * to determine that there is a valid descriptor awaiting
+ * processing. We want to be sure that the current slot
+ * holds a valid descriptor before reading its contents.
+ */
+ dma_rmb();
+ next = ibmvnic_next_scrq(adapter, rx_scrq);
rx_buff =
(struct ibmvnic_rx_buff *)be64_to_cpu(next->
rx_comp.correlator);
irq_dispose_mapping(scrq->irq);
scrq->irq = 0;
}
- memset(scrq->msgs, 0, 4 * PAGE_SIZE);
- atomic_set(&scrq->used, 0);
- scrq->cur = 0;
- scrq->ind_buf.index = 0;
+
+ if (scrq->msgs) {
+ memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ atomic_set(&scrq->used, 0);
+ scrq->cur = 0;
++ scrq->ind_buf.index = 0;
+ } else {
+ netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
+ return -EINVAL;
+ }
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
+ int total_bytes = 0;
+ int num_packets = 0;
+ /* The queue entry at the current index is peeked at above
+ * to determine that there is a valid descriptor awaiting
+ * processing. We want to be sure that the current slot
+ * holds a valid descriptor before reading its contents.
+ */
+ dma_rmb();
+
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
- if (next->tx_comp.rcs[i]) {
+ if (next->tx_comp.rcs[i])
dev_err(dev, "tx error %x\n",
next->tx_comp.rcs[i]);
- continue;
- }
index = be32_to_cpu(next->tx_comp.correlators[i]);
if (index & IBMVNIC_TSO_POOL_MASK) {
tx_pool = &adapter->tso_pool[pool];
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
+ caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
- if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
}
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+ enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+ };
+
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x30];
+ u8 reserved_at_0[0x1f];
+ u8 vhca_resource_manager[0x1];
+
+ u8 reserved_at_20[0x3];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
u8 vhca_id[0x10];
u8 reserved_at_40[0x40];