2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
38 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
39 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
42 static inline void mlx5e_tx_dma_unmap(struct device *pdev,
43 struct mlx5e_sq_dma *dma)
46 case MLX5E_DMA_MAP_SINGLE:
47 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
49 case MLX5E_DMA_MAP_PAGE:
50 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
53 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
57 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
60 enum mlx5e_dma_map_type map_type)
62 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
64 sq->db.dma_fifo[i].addr = addr;
65 sq->db.dma_fifo[i].size = size;
66 sq->db.dma_fifo[i].type = map_type;
70 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
72 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
75 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
79 for (i = 0; i < num_dma; i++) {
80 struct mlx5e_sq_dma *last_pushed_dma =
81 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
83 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
87 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
88 void *accel_priv, select_queue_fallback_t fallback)
90 struct mlx5e_priv *priv = netdev_priv(dev);
91 int channel_ix = fallback(dev, skb);
95 if (!netdev_get_num_tc(dev))
98 if (skb_vlan_tag_present(skb))
99 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
101 /* channel_ix can be larger than num_channels since
102 * dev->num_real_tx_queues = num_channels * num_tc
104 num_channels = priv->channels.params.num_channels;
105 if (channel_ix >= num_channels)
106 channel_ix = reciprocal_scale(channel_ix, num_channels);
108 return priv->channel_tc2txq[channel_ix][up];
111 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
113 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
115 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
118 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
120 struct flow_keys keys;
122 if (skb_transport_header_was_set(skb))
123 return skb_transport_offset(skb);
124 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
125 return keys.control.thoff;
127 return mlx5e_skb_l2_header_offset(skb);
130 static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
136 case MLX5_INLINE_MODE_NONE:
138 case MLX5_INLINE_MODE_TCP_UDP:
139 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
140 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
143 case MLX5_INLINE_MODE_IP:
144 /* When transport header is set to zero, it means no transport
145 * header. When transport header is set to 0xff's, it means
146 * transport header wasn't set.
148 if (skb_transport_offset(skb))
149 return mlx5e_skb_l3_header_offset(skb);
151 case MLX5_INLINE_MODE_L2:
153 return mlx5e_skb_l2_header_offset(skb);
157 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
158 unsigned int *skb_len,
165 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
166 unsigned char **skb_data,
167 unsigned int *skb_len)
169 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
170 int cpy1_sz = 2 * ETH_ALEN;
171 int cpy2_sz = ihs - cpy1_sz;
173 memcpy(vhdr, *skb_data, cpy1_sz);
174 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
175 vhdr->h_vlan_proto = skb->vlan_proto;
176 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
177 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
178 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
182 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
184 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
185 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
186 if (skb->encapsulation) {
187 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
188 MLX5_ETH_WQE_L4_INNER_CSUM;
189 sq->stats.csum_partial_inner++;
191 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
194 sq->stats.csum_none++;
198 mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
199 struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
203 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
205 if (skb->encapsulation) {
206 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
207 sq->stats.tso_inner_packets++;
208 sq->stats.tso_inner_bytes += skb->len - ihs;
210 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
211 sq->stats.tso_packets++;
212 sq->stats.tso_bytes += skb->len - ihs;
215 *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
220 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
221 unsigned char *skb_data, u16 headlen,
222 struct mlx5_wqe_data_seg *dseg)
224 dma_addr_t dma_addr = 0;
229 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
231 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
234 dseg->addr = cpu_to_be64(dma_addr);
235 dseg->lkey = sq->mkey_be;
236 dseg->byte_count = cpu_to_be32(headlen);
238 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
243 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
244 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
245 int fsz = skb_frag_size(frag);
247 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
249 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
252 dseg->addr = cpu_to_be64(dma_addr);
253 dseg->lkey = sq->mkey_be;
254 dseg->byte_count = cpu_to_be32(fsz);
256 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
265 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
266 u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
267 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
269 struct mlx5_wq_cyc *wq = &sq->wq;
272 wi->num_bytes = num_bytes;
273 wi->num_dma = num_dma;
274 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
277 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
278 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
280 netdev_tx_sent_queue(sq->txq, num_bytes);
282 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
283 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
285 sq->pc += wi->num_wqebbs;
286 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
287 netif_tx_stop_queue(sq->txq);
291 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
292 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
294 /* fill sq edge with nops to avoid wqe wrap around */
295 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
296 sq->db.wqe_info[pi].skb = NULL;
297 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
302 static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
304 struct mlx5_wq_cyc *wq = &sq->wq;
306 u16 pi = sq->pc & wq->sz_m1;
307 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
308 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
310 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
311 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
313 unsigned char *skb_data = skb->data;
314 unsigned int skb_len = skb->len;
315 u8 opcode = MLX5_OPCODE_SEND;
316 unsigned int num_bytes;
322 memset(wqe, 0, sizeof(*wqe));
324 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
326 if (skb_is_gso(skb)) {
327 opcode = MLX5_OPCODE_LSO;
328 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
329 sq->stats.packets += skb_shinfo(skb)->gso_segs;
331 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
332 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
335 sq->stats.bytes += num_bytes;
336 sq->stats.xmit_more += skb->xmit_more;
338 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
340 if (skb_vlan_tag_present(skb)) {
341 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
344 memcpy(eseg->inline_hdr.start, skb_data, ihs);
345 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
347 eseg->inline_hdr.sz = cpu_to_be16(ihs);
348 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
349 } else if (skb_vlan_tag_present(skb)) {
350 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
351 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
354 headlen = skb_len - skb->data_len;
355 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
356 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
357 if (unlikely(num_dma < 0))
358 goto dma_unmap_wqe_err;
360 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
361 num_bytes, num_dma, wi, cseg);
367 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
369 dev_kfree_skb_any(skb);
374 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
376 struct mlx5e_priv *priv = netdev_priv(dev);
377 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
379 return mlx5e_sq_xmit(sq, skb);
382 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
384 struct mlx5e_txqsq *sq;
391 sq = container_of(cq, struct mlx5e_txqsq, cq);
393 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
399 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
400 * otherwise a cq overrun may occur
404 /* avoid dirtying sq cache line every cqe */
405 dma_fifo_cc = sq->dma_fifo_cc;
407 for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
408 struct mlx5_cqe64 *cqe;
412 cqe = mlx5e_get_cqe(cq);
416 mlx5_cqwq_pop(&cq->wq);
418 wqe_counter = be16_to_cpu(cqe->wqe_counter);
421 struct mlx5e_tx_wqe_info *wi;
426 last_wqe = (sqcc == wqe_counter);
428 ci = sqcc & sq->wq.sz_m1;
429 wi = &sq->db.wqe_info[ci];
432 if (unlikely(!skb)) { /* nop */
437 if (unlikely(skb_shinfo(skb)->tx_flags &
439 struct skb_shared_hwtstamps hwts = {};
441 mlx5e_fill_hwstamp(sq->tstamp,
442 get_cqe_ts(cqe), &hwts);
443 skb_tstamp_tx(skb, &hwts);
446 for (j = 0; j < wi->num_dma; j++) {
447 struct mlx5e_sq_dma *dma =
448 mlx5e_dma_get(sq, dma_fifo_cc++);
450 mlx5e_tx_dma_unmap(sq->pdev, dma);
454 nbytes += wi->num_bytes;
455 sqcc += wi->num_wqebbs;
456 napi_consume_skb(skb, napi_budget);
460 mlx5_cqwq_update_db_record(&cq->wq);
462 /* ensure cq space is freed before enabling more cqes */
465 sq->dma_fifo_cc = dma_fifo_cc;
468 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
470 if (netif_tx_queue_stopped(sq->txq) &&
471 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
472 netif_tx_wake_queue(sq->txq);
476 return (i == MLX5E_TX_CQ_POLL_BUDGET);
479 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
481 struct mlx5e_tx_wqe_info *wi;
486 while (sq->cc != sq->pc) {
487 ci = sq->cc & sq->wq.sz_m1;
488 wi = &sq->db.wqe_info[ci];
491 if (!skb) { /* nop */
496 for (i = 0; i < wi->num_dma; i++) {
497 struct mlx5e_sq_dma *dma =
498 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
500 mlx5e_tx_dma_unmap(sq->pdev, dma);
503 dev_kfree_skb_any(skb);
504 sq->cc += wi->num_wqebbs;
508 #ifdef CONFIG_MLX5_CORE_IPOIB
510 struct mlx5_wqe_eth_pad {
514 struct mlx5i_tx_wqe {
515 struct mlx5_wqe_ctrl_seg ctrl;
516 struct mlx5_wqe_datagram_seg datagram;
517 struct mlx5_wqe_eth_pad pad;
518 struct mlx5_wqe_eth_seg eth;
522 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
523 struct mlx5_wqe_datagram_seg *dseg)
525 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
526 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
527 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
530 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
531 struct mlx5_av *av, u32 dqpn, u32 dqkey)
533 struct mlx5_wq_cyc *wq = &sq->wq;
534 u16 pi = sq->pc & wq->sz_m1;
535 struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
536 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
538 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
539 struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
540 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
542 unsigned char *skb_data = skb->data;
543 unsigned int skb_len = skb->len;
544 u8 opcode = MLX5_OPCODE_SEND;
545 unsigned int num_bytes;
551 memset(wqe, 0, sizeof(*wqe));
553 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
555 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
557 if (skb_is_gso(skb)) {
558 opcode = MLX5_OPCODE_LSO;
559 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
561 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
562 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
565 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
567 memcpy(eseg->inline_hdr.start, skb_data, ihs);
568 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
569 eseg->inline_hdr.sz = cpu_to_be16(ihs);
570 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
573 headlen = skb_len - skb->data_len;
574 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
575 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
576 if (unlikely(num_dma < 0))
577 goto dma_unmap_wqe_err;
579 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
580 num_bytes, num_dma, wi, cseg);
586 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
588 dev_kfree_skb_any(skb);