1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
5 #include <linux/bpf_trace.h>
8 #include <linux/vmalloc.h>
9 #include <linux/ptp_classify.h>
10 #include <net/pkt_sched.h>
12 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
14 int num_tx_rings = priv->num_tx_rings;
17 for (i = 0; i < priv->num_rx_rings; i++)
18 if (priv->rx_ring[i]->xdp.prog)
19 return num_tx_rings - num_possible_cpus();
24 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
25 struct enetc_bdr *tx_ring)
27 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
29 return priv->rx_ring[index];
32 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
34 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
40 static struct xdp_frame *
41 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
43 if (tx_swbd->is_xdp_redirect)
44 return tx_swbd->xdp_frame;
49 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
50 struct enetc_tx_swbd *tx_swbd)
52 /* For XDP_TX, pages come from RX, whereas for the other contexts where
53 * we have is_dma_page_set, those come from skb_frag_dma_map. We need
54 * to match the DMA mapping length, so we need to differentiate those.
56 if (tx_swbd->is_dma_page)
57 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
58 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
61 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
62 tx_swbd->len, tx_swbd->dir);
66 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
67 struct enetc_tx_swbd *tx_swbd)
69 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
70 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
73 enetc_unmap_tx_buff(tx_ring, tx_swbd);
76 xdp_return_frame(tx_swbd->xdp_frame);
77 tx_swbd->xdp_frame = NULL;
79 dev_kfree_skb_any(skb);
84 /* Let H/W know BD ring has been updated */
85 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
88 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
91 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
92 u8 *msgtype, u8 *twostep,
93 u16 *correction_offset, u16 *body_offset)
95 unsigned int ptp_class;
96 struct ptp_header *hdr;
100 ptp_class = ptp_classify_raw(skb);
101 if (ptp_class == PTP_CLASS_NONE)
104 hdr = ptp_parse_header(skb, ptp_class);
108 type = ptp_class & PTP_CLASS_PMASK;
109 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
114 *msgtype = ptp_get_msgtype(hdr, ptp_class);
115 *twostep = hdr->flag_field[0] & 0x2;
117 base = skb_mac_header(skb);
118 *correction_offset = (u8 *)&hdr->correction - base;
119 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
124 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
126 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
127 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
128 struct enetc_hw *hw = &priv->si->hw;
129 struct enetc_tx_swbd *tx_swbd;
130 int len = skb_headlen(skb);
131 union enetc_tx_bd temp_bd;
132 u8 msgtype, twostep, udp;
133 union enetc_tx_bd *txbd;
134 u16 offset1, offset2;
141 i = tx_ring->next_to_use;
142 txbd = ENETC_TXBD(*tx_ring, i);
145 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
146 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
149 temp_bd.addr = cpu_to_le64(dma);
150 temp_bd.buf_len = cpu_to_le16(len);
153 tx_swbd = &tx_ring->tx_swbd[i];
156 tx_swbd->is_dma_page = 0;
157 tx_swbd->dir = DMA_TO_DEVICE;
160 do_vlan = skb_vlan_tag_present(skb);
161 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
162 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
164 msgtype != PTP_MSGTYPE_SYNC || twostep)
165 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
167 do_onestep_tstamp = true;
168 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
169 do_twostep_tstamp = true;
172 tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
173 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
175 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
176 flags |= ENETC_TXBD_FLAGS_EX;
178 if (tx_ring->tsd_enable)
179 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
181 /* first BD needs frm_len and offload flags set */
182 temp_bd.frm_len = cpu_to_le16(skb->len);
183 temp_bd.flags = flags;
185 if (flags & ENETC_TXBD_FLAGS_TSE)
186 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
189 if (flags & ENETC_TXBD_FLAGS_EX) {
192 enetc_clear_tx_bd(&temp_bd);
194 /* add extension BD for VLAN and/or timestamping */
199 if (unlikely(i == tx_ring->bd_count)) {
201 tx_swbd = tx_ring->tx_swbd;
202 txbd = ENETC_TXBD(*tx_ring, 0);
207 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
208 temp_bd.ext.tpid = 0; /* < C-TAG */
209 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
212 if (do_onestep_tstamp) {
217 lo = enetc_rd_hot(hw, ENETC_SICTR0);
218 hi = enetc_rd_hot(hw, ENETC_SICTR1);
219 sec = (u64)hi << 32 | lo;
220 nsec = do_div(sec, 1000000000);
222 /* Configure extension BD */
223 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
224 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
226 /* Update originTimestamp field of Sync packet
227 * - 48 bits seconds field
228 * - 32 bits nanseconds field
230 data = skb_mac_header(skb);
231 *(__be16 *)(data + offset2) =
232 htons((sec >> 32) & 0xffff);
233 *(__be32 *)(data + offset2 + 2) =
234 htonl(sec & 0xffffffff);
235 *(__be32 *)(data + offset2 + 6) = htonl(nsec);
237 /* Configure single-step register */
238 val = ENETC_PM0_SINGLE_STEP_EN;
239 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
241 val |= ENETC_PM0_SINGLE_STEP_CH;
243 enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
244 enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
245 } else if (do_twostep_tstamp) {
246 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
247 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
250 temp_bd.ext.e_flags = e_flags;
254 frag = &skb_shinfo(skb)->frags[0];
255 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
256 len = skb_frag_size(frag);
257 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
259 if (dma_mapping_error(tx_ring->dev, dma))
263 enetc_clear_tx_bd(&temp_bd);
269 if (unlikely(i == tx_ring->bd_count)) {
271 tx_swbd = tx_ring->tx_swbd;
272 txbd = ENETC_TXBD(*tx_ring, 0);
276 temp_bd.addr = cpu_to_le64(dma);
277 temp_bd.buf_len = cpu_to_le16(len);
281 tx_swbd->is_dma_page = 1;
282 tx_swbd->dir = DMA_TO_DEVICE;
286 /* last BD needs 'F' bit set */
287 flags |= ENETC_TXBD_FLAGS_F;
288 temp_bd.flags = flags;
291 tx_ring->tx_swbd[i].is_eof = true;
292 tx_ring->tx_swbd[i].skb = skb;
294 enetc_bdr_idx_inc(tx_ring, &i);
295 tx_ring->next_to_use = i;
297 skb_tx_timestamp(skb);
299 enetc_update_tx_ring_tail(tx_ring);
304 dev_err(tx_ring->dev, "DMA map error");
307 tx_swbd = &tx_ring->tx_swbd[i];
308 enetc_free_tx_frame(tx_ring, tx_swbd);
310 i = tx_ring->bd_count;
317 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
318 struct net_device *ndev)
320 struct enetc_ndev_priv *priv = netdev_priv(ndev);
321 struct enetc_bdr *tx_ring;
324 /* Queue one-step Sync packet if already locked */
325 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
326 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
328 skb_queue_tail(&priv->tx_skbs, skb);
333 tx_ring = priv->tx_ring[skb->queue_mapping];
335 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
336 if (unlikely(skb_linearize(skb)))
337 goto drop_packet_err;
339 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
340 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
341 netif_stop_subqueue(ndev, tx_ring->index);
342 return NETDEV_TX_BUSY;
346 count = enetc_map_tx_buffs(tx_ring, skb);
349 if (unlikely(!count))
350 goto drop_packet_err;
352 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
353 netif_stop_subqueue(ndev, tx_ring->index);
358 dev_kfree_skb_any(skb);
362 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
364 struct enetc_ndev_priv *priv = netdev_priv(ndev);
365 u8 udp, msgtype, twostep;
366 u16 offset1, offset2;
368 /* Mark tx timestamp type on skb->cb[0] if requires */
369 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
370 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
371 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
376 /* Fall back to two-step timestamp if not one-step Sync packet */
377 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
378 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
379 &offset1, &offset2) ||
380 msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
381 skb->cb[0] = ENETC_F_TX_TSTAMP;
384 return enetc_start_xmit(skb, ndev);
387 static irqreturn_t enetc_msix(int irq, void *data)
389 struct enetc_int_vector *v = data;
394 /* disable interrupts */
395 enetc_wr_reg_hot(v->rbier, 0);
396 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
398 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
399 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
403 napi_schedule(&v->napi);
408 static void enetc_rx_dim_work(struct work_struct *w)
410 struct dim *dim = container_of(w, struct dim, work);
411 struct dim_cq_moder moder =
412 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
413 struct enetc_int_vector *v =
414 container_of(dim, struct enetc_int_vector, rx_dim);
416 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
417 dim->state = DIM_START_MEASURE;
420 static void enetc_rx_net_dim(struct enetc_int_vector *v)
422 struct dim_sample dim_sample = {};
426 if (!v->rx_napi_work)
429 dim_update_sample(v->comp_cnt,
430 v->rx_ring.stats.packets,
431 v->rx_ring.stats.bytes,
433 net_dim(&v->rx_dim, dim_sample);
436 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
438 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
440 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
443 static bool enetc_page_reusable(struct page *page)
445 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
448 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
449 struct enetc_rx_swbd *old)
451 struct enetc_rx_swbd *new;
453 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
455 /* next buf that may reuse a page */
456 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
458 /* copy page reference */
462 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
465 u32 lo, hi, tstamp_lo;
467 lo = enetc_rd_hot(hw, ENETC_SICTR0);
468 hi = enetc_rd_hot(hw, ENETC_SICTR1);
469 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
472 *tstamp = (u64)hi << 32 | tstamp_lo;
475 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
477 struct skb_shared_hwtstamps shhwtstamps;
479 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
480 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
481 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
482 skb_txtime_consumed(skb);
483 skb_tstamp_tx(skb, &shhwtstamps);
487 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
488 struct enetc_tx_swbd *tx_swbd)
490 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
491 struct enetc_rx_swbd rx_swbd = {
493 .page = tx_swbd->page,
494 .page_offset = tx_swbd->page_offset,
498 struct enetc_bdr *rx_ring;
500 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
502 if (likely(enetc_swbd_unused(rx_ring))) {
503 enetc_reuse_page(rx_ring, &rx_swbd);
505 /* sync for use by the device */
506 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
508 ENETC_RXB_DMA_SIZE_XDP,
511 rx_ring->stats.recycles++;
513 /* RX ring is already full, we need to unmap and free the
514 * page, since there's nothing useful we can do with it.
516 rx_ring->stats.recycle_failures++;
518 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
520 __free_page(rx_swbd.page);
523 rx_ring->xdp.xdp_tx_in_flight--;
526 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
528 struct net_device *ndev = tx_ring->ndev;
529 struct enetc_ndev_priv *priv = netdev_priv(ndev);
530 int tx_frm_cnt = 0, tx_byte_cnt = 0;
531 struct enetc_tx_swbd *tx_swbd;
533 bool do_twostep_tstamp;
536 i = tx_ring->next_to_clean;
537 tx_swbd = &tx_ring->tx_swbd[i];
539 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
541 do_twostep_tstamp = false;
543 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
544 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
545 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
546 bool is_eof = tx_swbd->is_eof;
548 if (unlikely(tx_swbd->check_wb)) {
549 struct enetc_ndev_priv *priv = netdev_priv(ndev);
550 union enetc_tx_bd *txbd;
552 txbd = ENETC_TXBD(*tx_ring, i);
554 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
555 tx_swbd->do_twostep_tstamp) {
556 enetc_get_tx_tstamp(&priv->si->hw, txbd,
558 do_twostep_tstamp = true;
562 if (tx_swbd->is_xdp_tx)
563 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
564 else if (likely(tx_swbd->dma))
565 enetc_unmap_tx_buff(tx_ring, tx_swbd);
568 xdp_return_frame(xdp_frame);
570 if (unlikely(tx_swbd->skb->cb[0] &
571 ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
572 /* Start work to release lock for next one-step
573 * timestamping packet. And send one skb in
574 * tx_skbs queue if has.
576 schedule_work(&priv->tx_onestep_tstamp);
577 } else if (unlikely(do_twostep_tstamp)) {
578 enetc_tstamp_tx(skb, tstamp);
579 do_twostep_tstamp = false;
581 napi_consume_skb(skb, napi_budget);
584 tx_byte_cnt += tx_swbd->len;
585 /* Scrub the swbd here so we don't have to do that
586 * when we reuse it during xmit
588 memset(tx_swbd, 0, sizeof(*tx_swbd));
593 if (unlikely(i == tx_ring->bd_count)) {
595 tx_swbd = tx_ring->tx_swbd;
598 /* BD iteration loop end */
601 /* re-arm interrupt source */
602 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
603 BIT(16 + tx_ring->index));
606 if (unlikely(!bds_to_clean))
607 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
610 tx_ring->next_to_clean = i;
611 tx_ring->stats.packets += tx_frm_cnt;
612 tx_ring->stats.bytes += tx_byte_cnt;
614 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
615 __netif_subqueue_stopped(ndev, tx_ring->index) &&
616 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
617 netif_wake_subqueue(ndev, tx_ring->index);
620 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
623 static bool enetc_new_page(struct enetc_bdr *rx_ring,
624 struct enetc_rx_swbd *rx_swbd)
626 bool xdp = !!(rx_ring->xdp.prog);
630 page = dev_alloc_page();
634 /* For XDP_TX, we forgo dma_unmap -> dma_map */
635 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
637 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
638 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
645 rx_swbd->page = page;
646 rx_swbd->page_offset = rx_ring->buffer_offset;
651 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
653 struct enetc_rx_swbd *rx_swbd;
654 union enetc_rx_bd *rxbd;
657 i = rx_ring->next_to_use;
658 rx_swbd = &rx_ring->rx_swbd[i];
659 rxbd = enetc_rxbd(rx_ring, i);
661 for (j = 0; j < buff_cnt; j++) {
663 if (unlikely(!rx_swbd->page)) {
664 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
665 rx_ring->stats.rx_alloc_errs++;
671 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
672 rx_swbd->page_offset);
673 /* clear 'R" as well */
676 enetc_rxbd_next(rx_ring, &rxbd, &i);
677 rx_swbd = &rx_ring->rx_swbd[i];
681 rx_ring->next_to_alloc = i; /* keep track from page reuse */
682 rx_ring->next_to_use = i;
684 /* update ENETC's consumer index */
685 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
691 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
692 static void enetc_get_rx_tstamp(struct net_device *ndev,
693 union enetc_rx_bd *rxbd,
696 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
697 struct enetc_ndev_priv *priv = netdev_priv(ndev);
698 struct enetc_hw *hw = &priv->si->hw;
699 u32 lo, hi, tstamp_lo;
702 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
703 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
704 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
705 rxbd = enetc_rxbd_ext(rxbd);
706 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
710 tstamp = (u64)hi << 32 | tstamp_lo;
711 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
712 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
717 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
718 union enetc_rx_bd *rxbd, struct sk_buff *skb)
720 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
723 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
724 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
726 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
727 skb->ip_summed = CHECKSUM_COMPLETE;
730 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
733 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
735 tpid = htons(ETH_P_8021Q);
738 tpid = htons(ETH_P_8021AD);
741 tpid = htons(enetc_port_rd(&priv->si->hw,
745 tpid = htons(enetc_port_rd(&priv->si->hw,
752 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
755 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
756 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
757 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
761 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
762 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
765 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
768 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
770 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
771 rx_swbd->page_offset,
776 /* Reuse the current page without performing half-page buffer flipping */
777 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
778 struct enetc_rx_swbd *rx_swbd)
780 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
782 enetc_reuse_page(rx_ring, rx_swbd);
784 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
785 rx_swbd->page_offset,
786 buffer_size, rx_swbd->dir);
788 rx_swbd->page = NULL;
791 /* Reuse the current page by performing half-page buffer flipping */
792 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
793 struct enetc_rx_swbd *rx_swbd)
795 if (likely(enetc_page_reusable(rx_swbd->page))) {
796 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
797 page_ref_inc(rx_swbd->page);
799 enetc_put_rx_buff(rx_ring, rx_swbd);
801 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
803 rx_swbd->page = NULL;
807 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
810 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
814 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
815 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
816 if (unlikely(!skb)) {
817 rx_ring->stats.rx_alloc_errs++;
821 skb_reserve(skb, rx_ring->buffer_offset);
822 __skb_put(skb, size);
824 enetc_flip_rx_buff(rx_ring, rx_swbd);
829 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
830 u16 size, struct sk_buff *skb)
832 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
834 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
835 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
837 enetc_flip_rx_buff(rx_ring, rx_swbd);
840 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
842 union enetc_rx_bd **rxbd, int *i)
844 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
847 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
848 enetc_rxbd_next(rx_ring, rxbd, i);
850 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
852 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
854 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
855 enetc_rxbd_next(rx_ring, rxbd, i);
858 rx_ring->ndev->stats.rx_dropped++;
859 rx_ring->ndev->stats.rx_errors++;
864 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
865 u32 bd_status, union enetc_rx_bd **rxbd,
866 int *i, int *cleaned_cnt, int buffer_size)
871 size = le16_to_cpu((*rxbd)->r.buf_len);
872 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
876 enetc_get_offloads(rx_ring, *rxbd, skb);
880 enetc_rxbd_next(rx_ring, rxbd, i);
882 /* not last BD in frame? */
883 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
884 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
887 if (bd_status & ENETC_RXBD_LSTATUS_F) {
889 size = le16_to_cpu((*rxbd)->r.buf_len);
892 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
896 enetc_rxbd_next(rx_ring, rxbd, i);
899 skb_record_rx_queue(skb, rx_ring->index);
900 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
905 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
907 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
908 struct napi_struct *napi, int work_limit)
910 int rx_frm_cnt = 0, rx_byte_cnt = 0;
913 cleaned_cnt = enetc_bd_unused(rx_ring);
914 /* next descriptor to process */
915 i = rx_ring->next_to_clean;
917 while (likely(rx_frm_cnt < work_limit)) {
918 union enetc_rx_bd *rxbd;
922 if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
923 cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
926 rxbd = enetc_rxbd(rx_ring, i);
927 bd_status = le32_to_cpu(rxbd->r.lstatus);
931 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
932 dma_rmb(); /* for reading other rxbd fields */
934 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
938 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
939 &cleaned_cnt, ENETC_RXB_DMA_SIZE);
943 rx_byte_cnt += skb->len;
946 napi_gro_receive(napi, skb);
949 rx_ring->next_to_clean = i;
951 rx_ring->stats.packets += rx_frm_cnt;
952 rx_ring->stats.bytes += rx_byte_cnt;
957 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
958 struct enetc_tx_swbd *tx_swbd,
961 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
965 enetc_clear_tx_bd(txbd);
966 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
967 txbd->buf_len = cpu_to_le16(tx_swbd->len);
968 txbd->frm_len = cpu_to_le16(frm_len);
970 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
973 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
976 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
977 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
979 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
980 int i, k, frm_len = tmp_tx_swbd->len;
982 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
985 while (unlikely(!tmp_tx_swbd->is_eof)) {
987 frm_len += tmp_tx_swbd->len;
990 i = tx_ring->next_to_use;
992 for (k = 0; k < num_tx_swbd; k++) {
993 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
995 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
997 /* last BD needs 'F' bit set */
998 if (xdp_tx_swbd->is_eof) {
999 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1001 txbd->flags = ENETC_TXBD_FLAGS_F;
1004 enetc_bdr_idx_inc(tx_ring, &i);
1007 tx_ring->next_to_use = i;
1012 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
1013 struct enetc_tx_swbd *xdp_tx_arr,
1014 struct xdp_frame *xdp_frame)
1016 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
1017 struct skb_shared_info *shinfo;
1018 void *data = xdp_frame->data;
1019 int len = xdp_frame->len;
1025 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1026 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1027 netdev_err(tx_ring->ndev, "DMA map error\n");
1031 xdp_tx_swbd->dma = dma;
1032 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1033 xdp_tx_swbd->len = len;
1034 xdp_tx_swbd->is_xdp_redirect = true;
1035 xdp_tx_swbd->is_eof = false;
1036 xdp_tx_swbd->xdp_frame = NULL;
1039 xdp_tx_swbd = &xdp_tx_arr[n];
1041 shinfo = xdp_get_shared_info_from_frame(xdp_frame);
1043 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
1045 data = skb_frag_address(frag);
1046 len = skb_frag_size(frag);
1048 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1049 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1050 /* Undo the DMA mapping for all fragments */
1052 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
1054 netdev_err(tx_ring->ndev, "DMA map error\n");
1058 xdp_tx_swbd->dma = dma;
1059 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1060 xdp_tx_swbd->len = len;
1061 xdp_tx_swbd->is_xdp_redirect = true;
1062 xdp_tx_swbd->is_eof = false;
1063 xdp_tx_swbd->xdp_frame = NULL;
1066 xdp_tx_swbd = &xdp_tx_arr[n];
1069 xdp_tx_arr[n - 1].is_eof = true;
1070 xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
1075 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
1076 struct xdp_frame **frames, u32 flags)
1078 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
1079 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1080 struct enetc_bdr *tx_ring;
1081 int xdp_tx_bd_cnt, i, k;
1082 int xdp_tx_frm_cnt = 0;
1086 tx_ring = priv->xdp_tx_ring[smp_processor_id()];
1088 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
1090 for (k = 0; k < num_frames; k++) {
1091 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
1094 if (unlikely(xdp_tx_bd_cnt < 0))
1097 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
1099 for (i = 0; i < xdp_tx_bd_cnt; i++)
1100 enetc_unmap_tx_buff(tx_ring,
1101 &xdp_redirect_arr[i]);
1102 tx_ring->stats.xdp_tx_drops++;
1109 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
1110 enetc_update_tx_ring_tail(tx_ring);
1112 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
1114 enetc_unlock_mdio();
1116 return xdp_tx_frm_cnt;
1119 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1120 struct xdp_buff *xdp_buff, u16 size)
1122 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1123 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
1124 struct skb_shared_info *shinfo;
1126 /* To be used for XDP_TX */
1127 rx_swbd->len = size;
1129 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
1130 rx_ring->buffer_offset, size, false);
1132 shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1133 shinfo->nr_frags = 0;
1136 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1137 u16 size, struct xdp_buff *xdp_buff)
1139 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1140 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1141 skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
1143 /* To be used for XDP_TX */
1144 rx_swbd->len = size;
1146 skb_frag_off_set(frag, rx_swbd->page_offset);
1147 skb_frag_size_set(frag, size);
1148 __skb_frag_set_page(frag, rx_swbd->page);
1153 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
1154 union enetc_rx_bd **rxbd, int *i,
1155 int *cleaned_cnt, struct xdp_buff *xdp_buff)
1157 u16 size = le16_to_cpu((*rxbd)->r.buf_len);
1159 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
1161 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
1163 enetc_rxbd_next(rx_ring, rxbd, i);
1165 /* not last BD in frame? */
1166 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1167 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1168 size = ENETC_RXB_DMA_SIZE_XDP;
1170 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1172 size = le16_to_cpu((*rxbd)->r.buf_len);
1175 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
1177 enetc_rxbd_next(rx_ring, rxbd, i);
1181 /* Convert RX buffer descriptors to TX buffer descriptors. These will be
1182 * recycled back into the RX ring in enetc_clean_tx_ring.
1184 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
1185 struct enetc_bdr *rx_ring,
1186 int rx_ring_first, int rx_ring_last)
1190 for (; rx_ring_first != rx_ring_last;
1191 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
1192 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1193 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
1195 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
1196 tx_swbd->dma = rx_swbd->dma;
1197 tx_swbd->dir = rx_swbd->dir;
1198 tx_swbd->page = rx_swbd->page;
1199 tx_swbd->page_offset = rx_swbd->page_offset;
1200 tx_swbd->len = rx_swbd->len;
1201 tx_swbd->is_dma_page = true;
1202 tx_swbd->is_xdp_tx = true;
1203 tx_swbd->is_eof = false;
1206 /* We rely on caller providing an rx_ring_last > rx_ring_first */
1207 xdp_tx_arr[n - 1].is_eof = true;
1212 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
1215 while (rx_ring_first != rx_ring_last) {
1216 enetc_put_rx_buff(rx_ring,
1217 &rx_ring->rx_swbd[rx_ring_first]);
1218 enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1220 rx_ring->stats.xdp_drops++;
1223 static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
1226 while (rx_ring_first != rx_ring_last) {
1227 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1229 if (rx_swbd->page) {
1230 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1232 __free_page(rx_swbd->page);
1233 rx_swbd->page = NULL;
1235 enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1237 rx_ring->stats.xdp_redirect_failures++;
1240 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
1241 struct napi_struct *napi, int work_limit,
1242 struct bpf_prog *prog)
1244 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
1245 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
1246 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1247 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1248 struct enetc_bdr *tx_ring;
1252 cleaned_cnt = enetc_bd_unused(rx_ring);
1253 /* next descriptor to process */
1254 i = rx_ring->next_to_clean;
1256 while (likely(rx_frm_cnt < work_limit)) {
1257 union enetc_rx_bd *rxbd, *orig_rxbd;
1258 int orig_i, orig_cleaned_cnt;
1259 struct xdp_buff xdp_buff;
1260 struct sk_buff *skb;
1261 int tmp_orig_i, err;
1264 rxbd = enetc_rxbd(rx_ring, i);
1265 bd_status = le32_to_cpu(rxbd->r.lstatus);
1269 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1270 dma_rmb(); /* for reading other rxbd fields */
1272 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1277 orig_cleaned_cnt = cleaned_cnt;
1280 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
1281 &cleaned_cnt, &xdp_buff);
1283 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
1287 bpf_warn_invalid_xdp_action(xdp_act);
1290 trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
1293 enetc_xdp_drop(rx_ring, orig_i, i);
1297 cleaned_cnt = orig_cleaned_cnt;
1300 skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
1302 ENETC_RXB_DMA_SIZE_XDP);
1306 napi_gro_receive(napi, skb);
1309 tx_ring = priv->xdp_tx_ring[rx_ring->index];
1310 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
1314 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
1315 enetc_xdp_drop(rx_ring, orig_i, i);
1316 tx_ring->stats.xdp_tx_drops++;
1318 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
1319 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
1321 /* The XDP_TX enqueue was successful, so we
1322 * need to scrub the RX software BDs because
1323 * the ownership of the buffers no longer
1324 * belongs to the RX ring, and we must prevent
1325 * enetc_refill_rx_ring() from reusing
1328 while (orig_i != i) {
1329 rx_ring->rx_swbd[orig_i].page = NULL;
1330 enetc_bdr_idx_inc(rx_ring, &orig_i);
1335 /* xdp_return_frame does not support S/G in the sense
1336 * that it leaks the fragments (__xdp_return should not
1337 * call page_frag_free only for the initial buffer).
1338 * Until XDP_REDIRECT gains support for S/G let's keep
1339 * the code structure in place, but dead. We drop the
1340 * S/G frames ourselves to avoid memory leaks which
1341 * would otherwise leave the kernel OOM.
1343 if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
1344 enetc_xdp_drop(rx_ring, orig_i, i);
1345 rx_ring->stats.xdp_redirect_sg++;
1349 tmp_orig_i = orig_i;
1351 while (orig_i != i) {
1352 enetc_flip_rx_buff(rx_ring,
1353 &rx_ring->rx_swbd[orig_i]);
1354 enetc_bdr_idx_inc(rx_ring, &orig_i);
1357 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
1358 if (unlikely(err)) {
1359 enetc_xdp_free(rx_ring, tmp_orig_i, i);
1361 xdp_redirect_frm_cnt++;
1362 rx_ring->stats.xdp_redirect++;
1370 rx_ring->next_to_clean = i;
1372 rx_ring->stats.packets += rx_frm_cnt;
1373 rx_ring->stats.bytes += rx_byte_cnt;
1375 if (xdp_redirect_frm_cnt)
1379 enetc_update_tx_ring_tail(tx_ring);
1381 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
1382 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
1383 rx_ring->xdp.xdp_tx_in_flight);
1388 static int enetc_poll(struct napi_struct *napi, int budget)
1390 struct enetc_int_vector
1391 *v = container_of(napi, struct enetc_int_vector, napi);
1392 struct enetc_bdr *rx_ring = &v->rx_ring;
1393 struct bpf_prog *prog;
1394 bool complete = true;
1400 for (i = 0; i < v->count_tx_rings; i++)
1401 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
1404 prog = rx_ring->xdp.prog;
1406 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
1408 work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
1409 if (work_done == budget)
1412 v->rx_napi_work = true;
1415 enetc_unlock_mdio();
1419 napi_complete_done(napi, work_done);
1421 if (likely(v->rx_dim_en))
1422 enetc_rx_net_dim(v);
1424 v->rx_napi_work = false;
1426 /* enable interrupts */
1427 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
1429 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1430 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
1433 enetc_unlock_mdio();
1438 /* Probing and Init */
1439 #define ENETC_MAX_RFS_SIZE 64
1440 void enetc_get_si_caps(struct enetc_si *si)
1442 struct enetc_hw *hw = &si->hw;
1445 /* find out how many of various resources we have to work with */
1446 val = enetc_rd(hw, ENETC_SICAPR0);
1447 si->num_rx_rings = (val >> 16) & 0xff;
1448 si->num_tx_rings = val & 0xff;
1450 val = enetc_rd(hw, ENETC_SIRFSCAPR);
1451 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
1452 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
1455 val = enetc_rd(hw, ENETC_SIPCAPR0);
1456 if (val & ENETC_SIPCAPR0_RSS) {
1459 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
1460 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
1463 if (val & ENETC_SIPCAPR0_QBV)
1464 si->hw_features |= ENETC_SI_F_QBV;
1466 if (val & ENETC_SIPCAPR0_PSFP)
1467 si->hw_features |= ENETC_SI_F_PSFP;
1470 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
1472 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
1473 &r->bd_dma_base, GFP_KERNEL);
1477 /* h/w requires 128B alignment */
1478 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
1479 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
1487 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
1491 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
1495 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
1497 vfree(txr->tx_swbd);
1501 txr->next_to_clean = 0;
1502 txr->next_to_use = 0;
1507 static void enetc_free_txbdr(struct enetc_bdr *txr)
1511 for (i = 0; i < txr->bd_count; i++)
1512 enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
1514 size = txr->bd_count * sizeof(union enetc_tx_bd);
1516 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
1517 txr->bd_base = NULL;
1519 vfree(txr->tx_swbd);
1520 txr->tx_swbd = NULL;
1523 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
1527 for (i = 0; i < priv->num_tx_rings; i++) {
1528 err = enetc_alloc_txbdr(priv->tx_ring[i]);
1538 enetc_free_txbdr(priv->tx_ring[i]);
1543 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
1547 for (i = 0; i < priv->num_tx_rings; i++)
1548 enetc_free_txbdr(priv->tx_ring[i]);
1551 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
1553 size_t size = sizeof(union enetc_rx_bd);
1556 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
1563 err = enetc_dma_alloc_bdr(rxr, size);
1565 vfree(rxr->rx_swbd);
1569 rxr->next_to_clean = 0;
1570 rxr->next_to_use = 0;
1571 rxr->next_to_alloc = 0;
1572 rxr->ext_en = extended;
1577 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
1581 size = rxr->bd_count * sizeof(union enetc_rx_bd);
1583 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
1584 rxr->bd_base = NULL;
1586 vfree(rxr->rx_swbd);
1587 rxr->rx_swbd = NULL;
1590 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
1592 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
1595 for (i = 0; i < priv->num_rx_rings; i++) {
1596 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
1606 enetc_free_rxbdr(priv->rx_ring[i]);
1611 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
1615 for (i = 0; i < priv->num_rx_rings; i++)
1616 enetc_free_rxbdr(priv->rx_ring[i]);
1619 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
1623 if (!tx_ring->tx_swbd)
1626 for (i = 0; i < tx_ring->bd_count; i++) {
1627 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
1629 enetc_free_tx_frame(tx_ring, tx_swbd);
1632 tx_ring->next_to_clean = 0;
1633 tx_ring->next_to_use = 0;
1636 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
1640 if (!rx_ring->rx_swbd)
1643 for (i = 0; i < rx_ring->bd_count; i++) {
1644 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1649 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1651 __free_page(rx_swbd->page);
1652 rx_swbd->page = NULL;
1655 rx_ring->next_to_clean = 0;
1656 rx_ring->next_to_use = 0;
1657 rx_ring->next_to_alloc = 0;
1660 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
1664 for (i = 0; i < priv->num_rx_rings; i++)
1665 enetc_free_rx_ring(priv->rx_ring[i]);
1667 for (i = 0; i < priv->num_tx_rings; i++)
1668 enetc_free_tx_ring(priv->tx_ring[i]);
1671 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1676 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1680 /* Set up RSS table defaults */
1681 for (i = 0; i < si->num_rss; i++)
1682 rss_table[i] = i % num_groups;
1684 enetc_set_rss_table(si, rss_table, si->num_rss);
1691 int enetc_configure_si(struct enetc_ndev_priv *priv)
1693 struct enetc_si *si = priv->si;
1694 struct enetc_hw *hw = &si->hw;
1697 /* set SI cache attributes */
1698 enetc_wr(hw, ENETC_SICAR0,
1699 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1700 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1702 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1705 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1713 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1715 struct enetc_si *si = priv->si;
1716 int cpus = num_online_cpus();
1718 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1719 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1721 /* Enable all available TX rings in order to configure as many
1722 * priorities as possible, when needed.
1723 * TODO: Make # of TX rings run-time configurable
1725 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1726 priv->num_tx_rings = si->num_tx_rings;
1727 priv->bdr_int_num = cpus;
1728 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1729 priv->tx_ictt = ENETC_TXIC_TIMETHR;
1732 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1734 struct enetc_si *si = priv->si;
1736 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1738 if (!priv->cls_rules)
1744 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1746 kfree(priv->cls_rules);
1749 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1751 int idx = tx_ring->index;
1754 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1755 lower_32_bits(tx_ring->bd_dma_base));
1757 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1758 upper_32_bits(tx_ring->bd_dma_base));
1760 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1761 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1762 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1764 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1765 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1766 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1768 /* enable Tx ints by setting pkt thr to 1 */
1769 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1771 tbmr = ENETC_TBMR_EN;
1772 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1773 tbmr |= ENETC_TBMR_VIH;
1776 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1778 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1779 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1780 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1783 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1785 int idx = rx_ring->index;
1788 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1789 lower_32_bits(rx_ring->bd_dma_base));
1791 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1792 upper_32_bits(rx_ring->bd_dma_base));
1794 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1795 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1796 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1798 if (rx_ring->xdp.prog)
1799 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
1801 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1803 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1805 /* enable Rx ints by setting pkt thr to 1 */
1806 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1808 rbmr = ENETC_RBMR_EN;
1810 if (rx_ring->ext_en)
1811 rbmr |= ENETC_RBMR_BDS;
1813 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1814 rbmr |= ENETC_RBMR_VTE;
1816 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1817 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1820 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1821 enetc_unlock_mdio();
1824 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1827 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1831 for (i = 0; i < priv->num_tx_rings; i++)
1832 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1834 for (i = 0; i < priv->num_rx_rings; i++)
1835 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1838 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1840 int idx = rx_ring->index;
1842 /* disable EN bit on ring */
1843 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1846 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1848 int delay = 8, timeout = 100;
1849 int idx = tx_ring->index;
1851 /* disable EN bit on ring */
1852 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1854 /* wait for busy to clear */
1855 while (delay < timeout &&
1856 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1861 if (delay >= timeout)
1862 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1866 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1870 for (i = 0; i < priv->num_tx_rings; i++)
1871 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1873 for (i = 0; i < priv->num_rx_rings; i++)
1874 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1879 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1881 struct pci_dev *pdev = priv->si->pdev;
1884 for (i = 0; i < priv->bdr_int_num; i++) {
1885 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1886 struct enetc_int_vector *v = priv->int_vector[i];
1887 int entry = ENETC_BDR_INT_BASE_IDX + i;
1888 struct enetc_hw *hw = &priv->si->hw;
1890 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1891 priv->ndev->name, i);
1892 err = request_irq(irq, enetc_msix, 0, v->name, v);
1894 dev_err(priv->dev, "request_irq() failed!\n");
1899 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1900 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1901 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1903 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1905 for (j = 0; j < v->count_tx_rings; j++) {
1906 int idx = v->tx_ring[j].index;
1908 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1910 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
1917 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1919 irq_set_affinity_hint(irq, NULL);
1920 free_irq(irq, priv->int_vector[i]);
1926 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1928 struct pci_dev *pdev = priv->si->pdev;
1931 for (i = 0; i < priv->bdr_int_num; i++) {
1932 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1934 irq_set_affinity_hint(irq, NULL);
1935 free_irq(irq, priv->int_vector[i]);
1939 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1941 struct enetc_hw *hw = &priv->si->hw;
1945 /* enable Tx & Rx event indication */
1947 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1948 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1949 /* init to non-0 minimum, will be adjusted later */
1952 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1956 for (i = 0; i < priv->num_rx_rings; i++) {
1957 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1958 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1959 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1962 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1963 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1965 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1967 for (i = 0; i < priv->num_tx_rings; i++) {
1968 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1969 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1970 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1974 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1978 for (i = 0; i < priv->num_tx_rings; i++)
1979 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1981 for (i = 0; i < priv->num_rx_rings; i++)
1982 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1985 static int enetc_phylink_connect(struct net_device *ndev)
1987 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1988 struct ethtool_eee edata;
1992 return 0; /* phy-less mode */
1994 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1996 dev_err(&ndev->dev, "could not attach to PHY\n");
2000 /* disable EEE autoneg, until ENETC driver supports it */
2001 memset(&edata, 0, sizeof(struct ethtool_eee));
2002 phylink_ethtool_set_eee(priv->phylink, &edata);
2007 static void enetc_tx_onestep_tstamp(struct work_struct *work)
2009 struct enetc_ndev_priv *priv;
2010 struct sk_buff *skb;
2012 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
2014 netif_tx_lock(priv->ndev);
2016 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
2017 skb = skb_dequeue(&priv->tx_skbs);
2019 enetc_start_xmit(skb, priv->ndev);
2021 netif_tx_unlock(priv->ndev);
2024 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
2026 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
2027 skb_queue_head_init(&priv->tx_skbs);
2030 void enetc_start(struct net_device *ndev)
2032 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2035 enetc_setup_interrupts(priv);
2037 for (i = 0; i < priv->bdr_int_num; i++) {
2038 int irq = pci_irq_vector(priv->si->pdev,
2039 ENETC_BDR_INT_BASE_IDX + i);
2041 napi_enable(&priv->int_vector[i]->napi);
2046 phylink_start(priv->phylink);
2048 netif_carrier_on(ndev);
2050 netif_tx_start_all_queues(ndev);
2053 int enetc_open(struct net_device *ndev)
2055 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2056 int num_stack_tx_queues;
2059 err = enetc_setup_irqs(priv);
2063 err = enetc_phylink_connect(ndev);
2065 goto err_phy_connect;
2067 err = enetc_alloc_tx_resources(priv);
2071 err = enetc_alloc_rx_resources(priv);
2075 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2077 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2079 goto err_set_queues;
2081 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
2083 goto err_set_queues;
2085 enetc_tx_onestep_tstamp_init(priv);
2086 enetc_setup_bdrs(priv);
2092 enetc_free_rx_resources(priv);
2094 enetc_free_tx_resources(priv);
2097 phylink_disconnect_phy(priv->phylink);
2099 enetc_free_irqs(priv);
2104 void enetc_stop(struct net_device *ndev)
2106 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2109 netif_tx_stop_all_queues(ndev);
2111 for (i = 0; i < priv->bdr_int_num; i++) {
2112 int irq = pci_irq_vector(priv->si->pdev,
2113 ENETC_BDR_INT_BASE_IDX + i);
2116 napi_synchronize(&priv->int_vector[i]->napi);
2117 napi_disable(&priv->int_vector[i]->napi);
2121 phylink_stop(priv->phylink);
2123 netif_carrier_off(ndev);
2125 enetc_clear_interrupts(priv);
2128 int enetc_close(struct net_device *ndev)
2130 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2133 enetc_clear_bdrs(priv);
2136 phylink_disconnect_phy(priv->phylink);
2137 enetc_free_rxtx_rings(priv);
2138 enetc_free_rx_resources(priv);
2139 enetc_free_tx_resources(priv);
2140 enetc_free_irqs(priv);
2145 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2147 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2148 struct tc_mqprio_qopt *mqprio = type_data;
2149 struct enetc_bdr *tx_ring;
2150 int num_stack_tx_queues;
2154 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2155 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2156 num_tc = mqprio->num_tc;
2159 netdev_reset_tc(ndev);
2160 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2162 /* Reset all ring priorities to 0 */
2163 for (i = 0; i < priv->num_tx_rings; i++) {
2164 tx_ring = priv->tx_ring[i];
2165 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
2171 /* Check if we have enough BD rings available to accommodate all TCs */
2172 if (num_tc > num_stack_tx_queues) {
2173 netdev_err(ndev, "Max %d traffic classes supported\n",
2174 priv->num_tx_rings);
2178 /* For the moment, we use only one BD ring per TC.
2180 * Configure num_tc BD rings with increasing priorities.
2182 for (i = 0; i < num_tc; i++) {
2183 tx_ring = priv->tx_ring[i];
2184 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
2187 /* Reset the number of netdev queues based on the TC count */
2188 netif_set_real_num_tx_queues(ndev, num_tc);
2190 netdev_set_num_tc(ndev, num_tc);
2192 /* Each TC is associated with one netdev queue */
2193 for (i = 0; i < num_tc; i++)
2194 netdev_set_tc_queue(ndev, i, 1, i);
2199 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2203 case TC_SETUP_QDISC_MQPRIO:
2204 return enetc_setup_tc_mqprio(ndev, type_data);
2205 case TC_SETUP_QDISC_TAPRIO:
2206 return enetc_setup_tc_taprio(ndev, type_data);
2207 case TC_SETUP_QDISC_CBS:
2208 return enetc_setup_tc_cbs(ndev, type_data);
2209 case TC_SETUP_QDISC_ETF:
2210 return enetc_setup_tc_txtime(ndev, type_data);
2211 case TC_SETUP_BLOCK:
2212 return enetc_setup_tc_psfp(ndev, type_data);
2218 static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
2219 struct netlink_ext_ack *extack)
2221 struct enetc_ndev_priv *priv = netdev_priv(dev);
2222 struct bpf_prog *old_prog;
2226 /* The buffer layout is changing, so we need to drain the old
2227 * RX buffers and seed new ones.
2229 is_up = netif_running(dev);
2233 old_prog = xchg(&priv->xdp_prog, prog);
2235 bpf_prog_put(old_prog);
2237 for (i = 0; i < priv->num_rx_rings; i++) {
2238 struct enetc_bdr *rx_ring = priv->rx_ring[i];
2240 rx_ring->xdp.prog = prog;
2243 rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
2245 rx_ring->buffer_offset = ENETC_RXB_PAD;
2249 return dev_open(dev, extack);
2254 int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
2256 switch (xdp->command) {
2257 case XDP_SETUP_PROG:
2258 return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
2266 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
2268 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2269 struct net_device_stats *stats = &ndev->stats;
2270 unsigned long packets = 0, bytes = 0;
2273 for (i = 0; i < priv->num_rx_rings; i++) {
2274 packets += priv->rx_ring[i]->stats.packets;
2275 bytes += priv->rx_ring[i]->stats.bytes;
2278 stats->rx_packets = packets;
2279 stats->rx_bytes = bytes;
2283 for (i = 0; i < priv->num_tx_rings; i++) {
2284 packets += priv->tx_ring[i]->stats.packets;
2285 bytes += priv->tx_ring[i]->stats.bytes;
2288 stats->tx_packets = packets;
2289 stats->tx_bytes = bytes;
2294 static int enetc_set_rss(struct net_device *ndev, int en)
2296 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2297 struct enetc_hw *hw = &priv->si->hw;
2300 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
2302 reg = enetc_rd(hw, ENETC_SIMR);
2303 reg &= ~ENETC_SIMR_RSSE;
2304 reg |= (en) ? ENETC_SIMR_RSSE : 0;
2305 enetc_wr(hw, ENETC_SIMR, reg);
2310 static int enetc_set_psfp(struct net_device *ndev, int en)
2312 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2316 err = enetc_psfp_enable(priv);
2320 priv->active_offloads |= ENETC_F_QCI;
2324 err = enetc_psfp_disable(priv);
2328 priv->active_offloads &= ~ENETC_F_QCI;
2333 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
2335 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2338 for (i = 0; i < priv->num_rx_rings; i++)
2339 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
2342 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
2344 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2347 for (i = 0; i < priv->num_tx_rings; i++)
2348 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
2351 int enetc_set_features(struct net_device *ndev,
2352 netdev_features_t features)
2354 netdev_features_t changed = ndev->features ^ features;
2357 if (changed & NETIF_F_RXHASH)
2358 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
2360 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2361 enetc_enable_rxvlan(ndev,
2362 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2364 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
2365 enetc_enable_txvlan(ndev,
2366 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
2368 if (changed & NETIF_F_HW_TC)
2369 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
2374 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2375 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
2377 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2378 struct hwtstamp_config config;
2381 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2384 switch (config.tx_type) {
2385 case HWTSTAMP_TX_OFF:
2386 priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2388 case HWTSTAMP_TX_ON:
2389 priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2390 priv->active_offloads |= ENETC_F_TX_TSTAMP;
2392 case HWTSTAMP_TX_ONESTEP_SYNC:
2393 priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2394 priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
2400 ao = priv->active_offloads;
2401 switch (config.rx_filter) {
2402 case HWTSTAMP_FILTER_NONE:
2403 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
2406 priv->active_offloads |= ENETC_F_RX_TSTAMP;
2407 config.rx_filter = HWTSTAMP_FILTER_ALL;
2410 if (netif_running(ndev) && ao != priv->active_offloads) {
2415 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2419 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
2421 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2422 struct hwtstamp_config config;
2426 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
2427 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
2428 else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
2429 config.tx_type = HWTSTAMP_TX_ON;
2431 config.tx_type = HWTSTAMP_TX_OFF;
2433 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
2434 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
2436 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2441 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2443 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2444 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2445 if (cmd == SIOCSHWTSTAMP)
2446 return enetc_hwtstamp_set(ndev, rq);
2447 if (cmd == SIOCGHWTSTAMP)
2448 return enetc_hwtstamp_get(ndev, rq);
2454 return phylink_mii_ioctl(priv->phylink, rq, cmd);
2457 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
2459 struct pci_dev *pdev = priv->si->pdev;
2460 int first_xdp_tx_ring;
2461 int i, n, err, nvec;
2464 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
2465 /* allocate MSIX for both messaging and Rx/Tx interrupts */
2466 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
2474 /* # of tx rings per int vector */
2475 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
2477 for (i = 0; i < priv->bdr_int_num; i++) {
2478 struct enetc_int_vector *v;
2479 struct enetc_bdr *bdr;
2482 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
2488 priv->int_vector[i] = v;
2492 bdr->ndev = priv->ndev;
2493 bdr->dev = priv->dev;
2494 bdr->bd_count = priv->rx_bd_count;
2495 bdr->buffer_offset = ENETC_RXB_PAD;
2496 priv->rx_ring[i] = bdr;
2498 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
2504 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
2505 MEM_TYPE_PAGE_SHARED, NULL);
2507 xdp_rxq_info_unreg(&bdr->xdp.rxq);
2512 /* init defaults for adaptive IC */
2513 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
2515 v->rx_dim_en = true;
2517 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
2518 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
2520 v->count_tx_rings = v_tx_rings;
2522 for (j = 0; j < v_tx_rings; j++) {
2525 /* default tx ring mapping policy */
2526 idx = priv->bdr_int_num * j + i;
2527 __set_bit(idx, &v->tx_rings_map);
2528 bdr = &v->tx_ring[j];
2530 bdr->ndev = priv->ndev;
2531 bdr->dev = priv->dev;
2532 bdr->bd_count = priv->tx_bd_count;
2533 priv->tx_ring[idx] = bdr;
2537 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
2538 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
2544 struct enetc_int_vector *v = priv->int_vector[i];
2545 struct enetc_bdr *rx_ring = &v->rx_ring;
2547 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
2548 xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
2549 netif_napi_del(&v->napi);
2550 cancel_work_sync(&v->rx_dim.work);
2554 pci_free_irq_vectors(pdev);
2559 void enetc_free_msix(struct enetc_ndev_priv *priv)
2563 for (i = 0; i < priv->bdr_int_num; i++) {
2564 struct enetc_int_vector *v = priv->int_vector[i];
2565 struct enetc_bdr *rx_ring = &v->rx_ring;
2567 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
2568 xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
2569 netif_napi_del(&v->napi);
2570 cancel_work_sync(&v->rx_dim.work);
2573 for (i = 0; i < priv->num_rx_rings; i++)
2574 priv->rx_ring[i] = NULL;
2576 for (i = 0; i < priv->num_tx_rings; i++)
2577 priv->tx_ring[i] = NULL;
2579 for (i = 0; i < priv->bdr_int_num; i++) {
2580 kfree(priv->int_vector[i]);
2581 priv->int_vector[i] = NULL;
2584 /* disable all MSIX for this device */
2585 pci_free_irq_vectors(priv->si->pdev);
2588 static void enetc_kfree_si(struct enetc_si *si)
2590 char *p = (char *)si - si->pad;
2595 static void enetc_detect_errata(struct enetc_si *si)
2597 if (si->pdev->revision == ENETC_REV1)
2598 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
2601 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
2603 struct enetc_si *si, *p;
2604 struct enetc_hw *hw;
2609 err = pci_enable_device_mem(pdev);
2611 dev_err(&pdev->dev, "device enable failed\n");
2615 /* set up for high or low dma */
2616 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2618 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2621 "DMA configuration failed: 0x%x\n", err);
2626 err = pci_request_mem_regions(pdev, name);
2628 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
2629 goto err_pci_mem_reg;
2632 pci_set_master(pdev);
2634 alloc_size = sizeof(struct enetc_si);
2636 /* align priv to 32B */
2637 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
2638 alloc_size += sizeof_priv;
2640 /* force 32B alignment for enetc_si */
2641 alloc_size += ENETC_SI_ALIGN - 1;
2643 p = kzalloc(alloc_size, GFP_KERNEL);
2649 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
2650 si->pad = (char *)si - (char *)p;
2652 pci_set_drvdata(pdev, si);
2656 len = pci_resource_len(pdev, ENETC_BAR_REGS);
2657 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
2660 dev_err(&pdev->dev, "ioremap() failed\n");
2663 if (len > ENETC_PORT_BASE)
2664 hw->port = hw->reg + ENETC_PORT_BASE;
2665 if (len > ENETC_GLOBAL_BASE)
2666 hw->global = hw->reg + ENETC_GLOBAL_BASE;
2668 enetc_detect_errata(si);
2675 pci_release_mem_regions(pdev);
2678 pci_disable_device(pdev);
2683 void enetc_pci_remove(struct pci_dev *pdev)
2685 struct enetc_si *si = pci_get_drvdata(pdev);
2686 struct enetc_hw *hw = &si->hw;
2690 pci_release_mem_regions(pdev);
2691 pci_disable_device(pdev);