1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <linux/stringify.h>
6 #include <net/xdp_sock_drv.h>
10 #include "i40e_txrx_common.h"
13 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
15 unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
17 rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
18 return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
21 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
23 memset(rx_ring->rx_bi_zc, 0,
24 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
27 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
29 return &rx_ring->rx_bi_zc[idx];
33 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
37 * @qid: Rx ring to associate buffer pool with
39 * Returns 0 on success, <0 on failure
41 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
42 struct xsk_buff_pool *pool,
45 struct net_device *netdev = vsi->netdev;
49 if (vsi->type != I40E_VSI_MAIN)
52 if (qid >= vsi->num_queue_pairs)
55 if (qid >= netdev->real_num_rx_queues ||
56 qid >= netdev->real_num_tx_queues)
59 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
63 set_bit(qid, vsi->af_xdp_zc_qps);
65 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
68 err = i40e_queue_pair_disable(vsi, qid);
72 err = i40e_queue_pair_enable(vsi, qid);
76 /* Kick start the NAPI context so that receiving will start */
77 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
86 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
89 * @qid: Rx ring to associate buffer pool with
91 * Returns 0 on success, <0 on failure
93 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
95 struct net_device *netdev = vsi->netdev;
96 struct xsk_buff_pool *pool;
100 pool = xsk_get_pool_from_qid(netdev, qid);
104 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
107 err = i40e_queue_pair_disable(vsi, qid);
112 clear_bit(qid, vsi->af_xdp_zc_qps);
113 xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
116 err = i40e_queue_pair_enable(vsi, qid);
125 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
128 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
129 * @qid: Rx ring to (dis)associate buffer pool (from)to
131 * This function enables or disables a buffer pool to a certain ring.
133 * Returns 0 on success, <0 on failure
135 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
138 return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
139 i40e_xsk_pool_disable(vsi, qid);
143 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
145 * @xdp: xdp_buff used as input to the XDP program
147 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
149 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
151 int err, result = I40E_XDP_PASS;
152 struct i40e_ring *xdp_ring;
153 struct bpf_prog *xdp_prog;
157 /* NB! xdp_prog will always be !NULL, due to the fact that
158 * this path is enabled by setting an XDP program.
160 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
161 act = bpf_prog_run_xdp(xdp_prog, xdp);
167 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
168 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
171 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
172 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
175 bpf_warn_invalid_xdp_action(act);
178 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
179 fallthrough; /* handle aborts by dropping packet */
181 result = I40E_XDP_CONSUMED;
188 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
190 u16 ntu = rx_ring->next_to_use;
191 union i40e_rx_desc *rx_desc;
192 struct xdp_buff **bi, *xdp;
196 rx_desc = I40E_RX_DESC(rx_ring, ntu);
197 bi = i40e_rx_bi(rx_ring, ntu);
199 xdp = xsk_buff_alloc(rx_ring->xsk_pool);
205 dma = xsk_buff_xdp_get_dma(xdp);
206 rx_desc->read.pkt_addr = cpu_to_le64(dma);
207 rx_desc->read.hdr_addr = 0;
213 if (unlikely(ntu == rx_ring->count)) {
214 rx_desc = I40E_RX_DESC(rx_ring, 0);
215 bi = i40e_rx_bi(rx_ring, 0);
221 if (rx_ring->next_to_use != ntu) {
222 /* clear the status bits for the next_to_use descriptor */
223 rx_desc->wb.qword1.status_error_len = 0;
224 i40e_release_rx_desc(rx_ring, ntu);
231 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
235 * This functions allocates a new skb from a zero-copy Rx buffer.
237 * Returns the skb, or NULL on failure.
239 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
240 struct xdp_buff *xdp)
242 unsigned int metasize = xdp->data - xdp->data_meta;
243 unsigned int datasize = xdp->data_end - xdp->data;
246 /* allocate a skb to store the frags */
247 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
248 xdp->data_end - xdp->data_hard_start,
249 GFP_ATOMIC | __GFP_NOWARN);
253 skb_reserve(skb, xdp->data - xdp->data_hard_start);
254 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
256 skb_metadata_set(skb, metasize);
263 static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
264 struct xdp_buff *xdp_buff,
265 union i40e_rx_desc *rx_desc,
266 unsigned int *rx_packets,
267 unsigned int *rx_bytes,
269 unsigned int xdp_res)
276 if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
279 if (xdp_res == I40E_XDP_CONSUMED) {
280 xsk_buff_free(xdp_buff);
284 if (xdp_res == I40E_XDP_PASS) {
285 /* NB! We are not checking for errors using
286 * i40e_test_staterr with
287 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
288 * SBP is *not* set in PRT_SBPVSI (default not set).
290 skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
292 rx_ring->rx_stats.alloc_buff_failed++;
298 if (eth_skb_pad(skb)) {
304 *rx_bytes = skb->len;
305 i40e_process_skb_fields(rx_ring, rx_desc, skb);
306 napi_gro_receive(&rx_ring->q_vector->napi, skb);
310 /* Should never get here, as all valid cases have been handled already.
316 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
318 * @budget: NAPI budget
320 * Returns amount of work completed
322 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
324 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
325 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
326 u16 next_to_clean = rx_ring->next_to_clean;
327 u16 count_mask = rx_ring->count - 1;
328 unsigned int xdp_res, xdp_xmit = 0;
329 bool failure = false;
331 while (likely(total_rx_packets < (unsigned int)budget)) {
332 union i40e_rx_desc *rx_desc;
333 unsigned int rx_packets;
334 unsigned int rx_bytes;
339 rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
340 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
342 /* This memory barrier is needed to keep us from reading
343 * any other fields out of the rx_desc until we have
344 * verified the descriptor has been written back.
348 if (i40e_rx_is_programming_status(qword)) {
349 i40e_clean_programming_status(rx_ring,
350 rx_desc->raw.qword[0],
352 bi = *i40e_rx_bi(rx_ring, next_to_clean);
354 next_to_clean = (next_to_clean + 1) & count_mask;
358 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
359 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
363 bi = *i40e_rx_bi(rx_ring, next_to_clean);
364 bi->data_end = bi->data + size;
365 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
367 xdp_res = i40e_run_xdp_zc(rx_ring, bi);
368 i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
369 &rx_bytes, size, xdp_res);
370 total_rx_packets += rx_packets;
371 total_rx_bytes += rx_bytes;
372 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
373 next_to_clean = (next_to_clean + 1) & count_mask;
376 rx_ring->next_to_clean = next_to_clean;
377 cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
379 if (cleaned_count >= I40E_RX_BUFFER_WRITE)
380 failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
382 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
383 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
385 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
386 if (failure || next_to_clean == rx_ring->next_to_use)
387 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
389 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
391 return (int)total_rx_packets;
393 return failure ? budget : (int)total_rx_packets;
396 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
397 unsigned int *total_bytes)
399 struct i40e_tx_desc *tx_desc;
402 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
403 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
405 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
406 tx_desc->buffer_addr = cpu_to_le64(dma);
407 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
410 *total_bytes += desc->len;
413 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
414 unsigned int *total_bytes)
416 u16 ntu = xdp_ring->next_to_use;
417 struct i40e_tx_desc *tx_desc;
421 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
422 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
423 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
425 tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
426 tx_desc->buffer_addr = cpu_to_le64(dma);
427 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
428 I40E_TX_DESC_CMD_EOP,
431 *total_bytes += desc[i].len;
434 xdp_ring->next_to_use = ntu;
437 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
438 unsigned int *total_bytes)
440 u32 batched, leftover, i;
442 batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
443 leftover = nb_pkts & (PKTS_PER_BATCH - 1);
444 for (i = 0; i < batched; i += PKTS_PER_BATCH)
445 i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
446 for (i = batched; i < batched + leftover; i++)
447 i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
450 static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
452 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
453 struct i40e_tx_desc *tx_desc;
455 tx_desc = I40E_TX_DESC(xdp_ring, ntu);
456 tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
460 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
461 * @xdp_ring: XDP Tx ring
462 * @budget: NAPI budget
464 * Returns true if the work is finished.
466 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
468 struct xdp_desc *descs = xdp_ring->xsk_descs;
469 u32 nb_pkts, nb_processed = 0;
470 unsigned int total_bytes = 0;
472 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
476 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
477 nb_processed = xdp_ring->count - xdp_ring->next_to_use;
478 i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
479 xdp_ring->next_to_use = 0;
482 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
485 /* Request an interrupt for the last frame and bump tail ptr. */
486 i40e_set_rs_bit(xdp_ring);
487 i40e_xdp_ring_update_tail(xdp_ring);
489 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
495 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
496 * @tx_ring: XDP Tx ring
497 * @tx_bi: Tx buffer info to clean
499 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
500 struct i40e_tx_buffer *tx_bi)
502 xdp_return_frame(tx_bi->xdpf);
503 tx_ring->xdp_tx_active--;
504 dma_unmap_single(tx_ring->dev,
505 dma_unmap_addr(tx_bi, dma),
506 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
507 dma_unmap_len_set(tx_bi, len, 0);
511 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
513 * @tx_ring: XDP Tx ring
515 * Returns true if cleanup/tranmission is done.
517 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
519 struct xsk_buff_pool *bp = tx_ring->xsk_pool;
520 u32 i, completed_frames, xsk_frames = 0;
521 u32 head_idx = i40e_get_head(tx_ring);
522 struct i40e_tx_buffer *tx_bi;
525 if (head_idx < tx_ring->next_to_clean)
526 head_idx += tx_ring->count;
527 completed_frames = head_idx - tx_ring->next_to_clean;
529 if (completed_frames == 0)
532 if (likely(!tx_ring->xdp_tx_active)) {
533 xsk_frames = completed_frames;
537 ntc = tx_ring->next_to_clean;
539 for (i = 0; i < completed_frames; i++) {
540 tx_bi = &tx_ring->tx_bi[ntc];
543 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
549 if (++ntc >= tx_ring->count)
554 tx_ring->next_to_clean += completed_frames;
555 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
556 tx_ring->next_to_clean -= tx_ring->count;
559 xsk_tx_completed(bp, xsk_frames);
561 i40e_arm_wb(tx_ring, vsi, completed_frames);
564 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
565 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
567 return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
571 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
572 * @dev: the netdevice
573 * @queue_id: queue id to wake up
574 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
576 * Returns <0 for errors, 0 otherwise.
578 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
580 struct i40e_netdev_priv *np = netdev_priv(dev);
581 struct i40e_vsi *vsi = np->vsi;
582 struct i40e_pf *pf = vsi->back;
583 struct i40e_ring *ring;
585 if (test_bit(__I40E_CONFIG_BUSY, pf->state))
588 if (test_bit(__I40E_VSI_DOWN, vsi->state))
591 if (!i40e_enabled_xdp_vsi(vsi))
594 if (queue_id >= vsi->num_queue_pairs)
597 if (!vsi->xdp_rings[queue_id]->xsk_pool)
600 ring = vsi->xdp_rings[queue_id];
602 /* The idea here is that if NAPI is running, mark a miss, so
603 * it will run again. If not, trigger an interrupt and
604 * schedule the NAPI from interrupt context. If NAPI would be
605 * scheduled here, the interrupt affinity would not be
608 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
609 i40e_force_wb(vsi, ring->q_vector);
614 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
616 u16 count_mask = rx_ring->count - 1;
617 u16 ntc = rx_ring->next_to_clean;
618 u16 ntu = rx_ring->next_to_use;
620 for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
621 struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
623 xsk_buff_free(rx_bi);
628 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
629 * @tx_ring: XDP Tx ring
631 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
633 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
634 struct xsk_buff_pool *bp = tx_ring->xsk_pool;
635 struct i40e_tx_buffer *tx_bi;
639 tx_bi = &tx_ring->tx_bi[ntc];
642 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
649 if (ntc >= tx_ring->count)
654 xsk_tx_completed(bp, xsk_frames);
658 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
659 * buffer pool attached
662 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
664 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
666 struct net_device *netdev = vsi->netdev;
669 for (i = 0; i < vsi->num_queue_pairs; i++) {
670 if (xsk_get_pool_from_qid(netdev, i))