static int chcr_stats_show(struct seq_file *seq, void *v)
{
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ struct ch_ktls_port_stats_debug *ktls_port;
+ int i = 0;
+#endif
struct adapter *adap = seq->private;
seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
seq_printf(seq, "Tx TLS offload refcount: %20u\n",
refcount_read(&adap->chcr_ktls.ktls_refcount));
- seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_ctx));
- seq_printf(seq, "Tx connection created: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_open));
- seq_printf(seq, "Tx connection failed: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_fail));
- seq_printf(seq, "Tx connection closed: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_close));
- seq_printf(seq, "Packets passed for encryption : %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_packets));
- seq_printf(seq, "Bytes passed for encryption : %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_bytes));
seq_printf(seq, "Tx records send: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records));
seq_printf(seq, "Tx partial start of records: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
- seq_printf(seq, "Tx out of order packets: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_ooo));
- seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_skip_no_sync_data));
- seq_printf(seq, "Tx drop not synced packets: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_no_sync_data));
- seq_printf(seq, "Tx drop bypass req: %20llu\n",
- atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_bypass_req));
+ while (i < MAX_NPORTS) {
+ ktls_port = &adap->ch_ktls_stats.ktls_port[i];
+ seq_printf(seq, "Port %d\n", i);
+ seq_printf(seq, "Tx connection created: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_open));
+ seq_printf(seq, "Tx connection failed: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_fail));
+ seq_printf(seq, "Tx connection closed: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_close));
+ i++;
+ }
#endif
return 0;
}
"vlan_insertions ",
"gro_packets ",
"gro_merged ",
-};
-
-static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
- "db_drop ",
- "db_full ",
- "db_empty ",
- "write_coal_success ",
- "write_coal_fail ",
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
"tx_tls_encrypted_packets",
"tx_tls_encrypted_bytes ",
#endif
};
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
u64 vlan_ins;
u64 gro_pkts;
u64 gro_merged;
-};
-
-struct adapter_stats {
- u64 db_drop;
- u64 db_full;
- u64 db_empty;
- u64 wc_success;
- u64 wc_fail;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
#endif
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ const struct ch_ktls_port_stats_debug *ktls_stats;
+#endif
struct sge_eohw_txq *eohw_tx;
unsigned int i;
s->vlan_ins += eohw_tx->vlan_ins;
}
}
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
+ s->tx_tls_encrypted_packets =
+ atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
+ s->tx_tls_encrypted_bytes =
+ atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
+ s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
+ s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
+ s->tx_tls_skip_no_sync_data =
+ atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
+ s->tx_tls_drop_no_sync_data =
+ atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
+ s->tx_tls_drop_bypass_req =
+ atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
+#endif
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
#include "cxgb4.h"
#define MAX_ULD_QSETS 16
+#define MAX_ULD_NPORTS 4
/* CPL message priority levels */
enum {
};
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
-struct ch_ktls_stats_debug {
+struct ch_ktls_port_stats_debug {
atomic64_t ktls_tx_connection_open;
atomic64_t ktls_tx_connection_fail;
atomic64_t ktls_tx_connection_close;
- atomic64_t ktls_tx_send_records;
- atomic64_t ktls_tx_end_pkts;
- atomic64_t ktls_tx_start_pkts;
- atomic64_t ktls_tx_middle_pkts;
- atomic64_t ktls_tx_retransmit_pkts;
- atomic64_t ktls_tx_complete_pkts;
- atomic64_t ktls_tx_trimmed_pkts;
atomic64_t ktls_tx_encrypted_packets;
atomic64_t ktls_tx_encrypted_bytes;
atomic64_t ktls_tx_ctx;
atomic64_t ktls_tx_drop_no_sync_data;
atomic64_t ktls_tx_drop_bypass_req;
};
+
+struct ch_ktls_stats_debug {
+ struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
+ atomic64_t ktls_tx_send_records;
+ atomic64_t ktls_tx_end_pkts;
+ atomic64_t ktls_tx_start_pkts;
+ atomic64_t ktls_tx_middle_pkts;
+ atomic64_t ktls_tx_retransmit_pkts;
+ atomic64_t ktls_tx_complete_pkts;
+ atomic64_t ktls_tx_trimmed_pkts;
+};
#endif
struct chcr_stats_debug {
struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ struct ch_ktls_port_stats_debug *port_stats;
if (!tx_info)
return;
tx_info->tid, tx_info->ip_family);
}
- atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_connection_close);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
/* release module refcount */
u32 start_offload_tcp_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
pi = netdev_priv(netdev);
adap = pi->adapter;
+ port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_open);
- atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open);
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
goto out;
if (!cxgb4_check_l2t_valid(tx_info->l2te))
goto free_tid;
- atomic64_inc(&adap->ch_ktls_stats.ktls_tx_ctx);
+ atomic64_inc(&port_stats->ktls_tx_ctx);
tx_ctx->chcr_info = tx_info;
return 0;
else
kvfree(tx_info);
out:
- atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_fail);
+ atomic64_inc(&port_stats->ktls_tx_connection_fail);
return -1;
}
u64 tcp_ack, u64 tcp_win)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
+ struct ch_ktls_port_stats_debug *port_stats;
u32 len, cpl = 0, ndesc, wr_len;
struct fw_ulptx_wr *wr;
int credits;
/* reset snd una if it's a re-transmit pkt */
if (tcp_seq != tx_info->prev_seq) {
/* reset snd_una */
+ port_stats =
+ &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
TCB_SND_UNA_RAW_W,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ooo);
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
/* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb);
adap = tx_info->adap;
stats = &adap->ch_ktls_stats;
+ port_stats = &stats->ktls_port[tx_info->port_id];
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
*/
if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
+ atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out;
}
if (unlikely(tls_record_is_start_marker(record))) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
+ atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
goto out;
}
} while (data_len > 0);
tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
-
- atomic64_inc(&stats->ktls_tx_encrypted_packets);
- atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
+ atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
+ atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options
* as well.