From 5e7d77a9c5738457cdaa6bc230799c8f80733481 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 24 May 2018 13:44:24 +0300 Subject: [PATCH] net/mlx5e: TX, Obsolete maintaining local copies of skb->len/data Instead of maintaining a local copy of skb->len/data and updating it upon every copy to the WQE inline part, just calculate it once when needed, using the ihs. This obsoletes the function mlx5e_tx_skb_pull_inline. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 42 +++++++------------------ 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 725c062..f29deb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -188,28 +188,16 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, return min_t(u16, hlen, skb_headlen(skb)); } -static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, - unsigned int *skb_len, - unsigned int len) -{ - *skb_len -= len; - *skb_data += len; -} - -static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, - unsigned char **skb_data, - unsigned int *skb_len) +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, *skb_data, cpy1_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); + memcpy(vhdr, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); + memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); } static inline void @@ -357,8 +345,6 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe_info *wi; struct mlx5e_sq_stats *stats = sq->stats; - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; u16 ds_cnt, ds_cnt_inl = 0; u16 headlen, ihs, frag_pi; u8 num_wqebbs, opcode; @@ -385,7 +371,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->bytes += num_bytes; stats->xmit_more += skb->xmit_more; - headlen = skb_len - ihs - skb->data_len; + headlen = skb->len - ihs - skb->data_len; ds_cnt += !!headlen; ds_cnt += skb_shinfo(skb)->nr_frags; @@ -414,15 +400,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->mss = mss; if (ihs) { + eseg->inline_hdr.sz = cpu_to_be16(ihs); if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, - ihs - VLAN_HLEN, &skb_data, &skb_len); + ihs -= VLAN_HLEN; + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); stats->added_vlan_packets++; } else { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); } - eseg->inline_hdr.sz = cpu_to_be16(ihs); dseg += ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); @@ -432,7 +417,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->added_vlan_packets++; } - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; @@ -644,8 +629,6 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe_info *wi; struct mlx5e_sq_stats *stats = sq->stats; - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; u16 headlen, ihs, pi, frag_pi; u16 ds_cnt, ds_cnt_inl = 0; u8 num_wqebbs, opcode; @@ -672,7 +655,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->bytes += num_bytes; stats->xmit_more += skb->xmit_more; - headlen = skb_len - ihs - skb->data_len; + headlen = skb->len - ihs - skb->data_len; ds_cnt += !!headlen; ds_cnt += skb_shinfo(skb)->nr_frags; @@ -704,13 +687,12 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->mss = mss; if (ihs) { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs); dseg += ds_cnt_inl; } - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; -- 2.7.4