1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
7 #include <linux/vmalloc.h>
8 #include <net/pkt_sched.h>
10 /* ENETC overhead: optional extension BD + 1 BD gap */
11 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
12 /* max # of chained Tx BDs is 15, including head and extension BD */
13 #define ENETC_MAX_SKB_FRAGS 13
14 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
16 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
19 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
21 struct enetc_ndev_priv *priv = netdev_priv(ndev);
22 struct enetc_bdr *tx_ring;
25 tx_ring = priv->tx_ring[skb->queue_mapping];
27 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
28 if (unlikely(skb_linearize(skb)))
31 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
32 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
33 netif_stop_subqueue(ndev, tx_ring->index);
34 return NETDEV_TX_BUSY;
38 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
44 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
45 netif_stop_subqueue(ndev, tx_ring->index);
50 dev_kfree_skb_any(skb);
54 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
55 struct enetc_tx_swbd *tx_swbd)
57 if (tx_swbd->is_dma_page)
58 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
59 tx_swbd->len, DMA_TO_DEVICE);
61 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
62 tx_swbd->len, DMA_TO_DEVICE);
66 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
67 struct enetc_tx_swbd *tx_swbd)
70 enetc_unmap_tx_buff(tx_ring, tx_swbd);
73 dev_kfree_skb_any(tx_swbd->skb);
78 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
81 struct enetc_tx_swbd *tx_swbd;
83 int len = skb_headlen(skb);
84 union enetc_tx_bd temp_bd;
85 union enetc_tx_bd *txbd;
86 bool do_vlan, do_tstamp;
92 i = tx_ring->next_to_use;
93 txbd = ENETC_TXBD(*tx_ring, i);
96 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
97 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
100 temp_bd.addr = cpu_to_le64(dma);
101 temp_bd.buf_len = cpu_to_le16(len);
104 tx_swbd = &tx_ring->tx_swbd[i];
107 tx_swbd->is_dma_page = 0;
110 do_vlan = skb_vlan_tag_present(skb);
111 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
112 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
113 tx_swbd->do_tstamp = do_tstamp;
114 tx_swbd->check_wb = tx_swbd->do_tstamp;
116 if (do_vlan || do_tstamp)
117 flags |= ENETC_TXBD_FLAGS_EX;
119 if (tx_ring->tsd_enable)
120 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
122 /* first BD needs frm_len and offload flags set */
123 temp_bd.frm_len = cpu_to_le16(skb->len);
124 temp_bd.flags = flags;
126 if (flags & ENETC_TXBD_FLAGS_TSE)
127 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
130 if (flags & ENETC_TXBD_FLAGS_EX) {
133 enetc_clear_tx_bd(&temp_bd);
135 /* add extension BD for VLAN and/or timestamping */
140 if (unlikely(i == tx_ring->bd_count)) {
142 tx_swbd = tx_ring->tx_swbd;
143 txbd = ENETC_TXBD(*tx_ring, 0);
148 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
149 temp_bd.ext.tpid = 0; /* < C-TAG */
150 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
154 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
155 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
158 temp_bd.ext.e_flags = e_flags;
162 frag = &skb_shinfo(skb)->frags[0];
163 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
164 len = skb_frag_size(frag);
165 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
167 if (dma_mapping_error(tx_ring->dev, dma))
171 enetc_clear_tx_bd(&temp_bd);
177 if (unlikely(i == tx_ring->bd_count)) {
179 tx_swbd = tx_ring->tx_swbd;
180 txbd = ENETC_TXBD(*tx_ring, 0);
184 temp_bd.addr = cpu_to_le64(dma);
185 temp_bd.buf_len = cpu_to_le16(len);
189 tx_swbd->is_dma_page = 1;
193 /* last BD needs 'F' bit set */
194 flags |= ENETC_TXBD_FLAGS_F;
195 temp_bd.flags = flags;
198 tx_ring->tx_swbd[i].skb = skb;
200 enetc_bdr_idx_inc(tx_ring, &i);
201 tx_ring->next_to_use = i;
203 skb_tx_timestamp(skb);
205 /* let H/W know BD ring has been updated */
206 enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
211 dev_err(tx_ring->dev, "DMA map error");
214 tx_swbd = &tx_ring->tx_swbd[i];
215 enetc_free_tx_skb(tx_ring, tx_swbd);
217 i = tx_ring->bd_count;
224 static irqreturn_t enetc_msix(int irq, void *data)
226 struct enetc_int_vector *v = data;
231 /* disable interrupts */
232 enetc_wr_reg_hot(v->rbier, 0);
233 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
235 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
236 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
240 napi_schedule(&v->napi);
245 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
246 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
247 struct napi_struct *napi, int work_limit);
249 static void enetc_rx_dim_work(struct work_struct *w)
251 struct dim *dim = container_of(w, struct dim, work);
252 struct dim_cq_moder moder =
253 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
254 struct enetc_int_vector *v =
255 container_of(dim, struct enetc_int_vector, rx_dim);
257 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
258 dim->state = DIM_START_MEASURE;
261 static void enetc_rx_net_dim(struct enetc_int_vector *v)
263 struct dim_sample dim_sample;
267 if (!v->rx_napi_work)
270 dim_update_sample(v->comp_cnt,
271 v->rx_ring.stats.packets,
272 v->rx_ring.stats.bytes,
274 net_dim(&v->rx_dim, dim_sample);
277 static int enetc_poll(struct napi_struct *napi, int budget)
279 struct enetc_int_vector
280 *v = container_of(napi, struct enetc_int_vector, napi);
281 bool complete = true;
287 for (i = 0; i < v->count_tx_rings; i++)
288 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
291 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
292 if (work_done == budget)
295 v->rx_napi_work = true;
302 napi_complete_done(napi, work_done);
304 if (likely(v->rx_dim_en))
307 v->rx_napi_work = false;
309 /* enable interrupts */
310 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
312 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
313 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
321 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
323 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
325 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
328 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
331 u32 lo, hi, tstamp_lo;
333 lo = enetc_rd_hot(hw, ENETC_SICTR0);
334 hi = enetc_rd_hot(hw, ENETC_SICTR1);
335 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
338 *tstamp = (u64)hi << 32 | tstamp_lo;
341 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
343 struct skb_shared_hwtstamps shhwtstamps;
345 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
346 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
347 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
348 skb_txtime_consumed(skb);
349 skb_tstamp_tx(skb, &shhwtstamps);
353 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
355 struct net_device *ndev = tx_ring->ndev;
356 int tx_frm_cnt = 0, tx_byte_cnt = 0;
357 struct enetc_tx_swbd *tx_swbd;
362 i = tx_ring->next_to_clean;
363 tx_swbd = &tx_ring->tx_swbd[i];
365 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
369 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
370 bool is_eof = !!tx_swbd->skb;
372 if (unlikely(tx_swbd->check_wb)) {
373 struct enetc_ndev_priv *priv = netdev_priv(ndev);
374 union enetc_tx_bd *txbd;
376 txbd = ENETC_TXBD(*tx_ring, i);
378 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
379 tx_swbd->do_tstamp) {
380 enetc_get_tx_tstamp(&priv->si->hw, txbd,
386 if (likely(tx_swbd->dma))
387 enetc_unmap_tx_buff(tx_ring, tx_swbd);
390 if (unlikely(do_tstamp)) {
391 enetc_tstamp_tx(tx_swbd->skb, tstamp);
394 napi_consume_skb(tx_swbd->skb, napi_budget);
398 tx_byte_cnt += tx_swbd->len;
403 if (unlikely(i == tx_ring->bd_count)) {
405 tx_swbd = tx_ring->tx_swbd;
408 /* BD iteration loop end */
411 /* re-arm interrupt source */
412 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
413 BIT(16 + tx_ring->index));
416 if (unlikely(!bds_to_clean))
417 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
420 tx_ring->next_to_clean = i;
421 tx_ring->stats.packets += tx_frm_cnt;
422 tx_ring->stats.bytes += tx_byte_cnt;
424 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
425 __netif_subqueue_stopped(ndev, tx_ring->index) &&
426 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
427 netif_wake_subqueue(ndev, tx_ring->index);
430 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
433 static bool enetc_new_page(struct enetc_bdr *rx_ring,
434 struct enetc_rx_swbd *rx_swbd)
439 page = dev_alloc_page();
443 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
444 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
451 rx_swbd->page = page;
452 rx_swbd->page_offset = ENETC_RXB_PAD;
457 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
459 struct enetc_rx_swbd *rx_swbd;
460 union enetc_rx_bd *rxbd;
463 i = rx_ring->next_to_use;
464 rx_swbd = &rx_ring->rx_swbd[i];
465 rxbd = enetc_rxbd(rx_ring, i);
467 for (j = 0; j < buff_cnt; j++) {
469 if (unlikely(!rx_swbd->page)) {
470 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
471 rx_ring->stats.rx_alloc_errs++;
477 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
478 rx_swbd->page_offset);
479 /* clear 'R" as well */
482 enetc_rxbd_next(rx_ring, &rxbd, &i);
483 rx_swbd = &rx_ring->rx_swbd[i];
487 rx_ring->next_to_alloc = i; /* keep track from page reuse */
488 rx_ring->next_to_use = i;
494 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
495 static void enetc_get_rx_tstamp(struct net_device *ndev,
496 union enetc_rx_bd *rxbd,
499 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
500 struct enetc_ndev_priv *priv = netdev_priv(ndev);
501 struct enetc_hw *hw = &priv->si->hw;
502 u32 lo, hi, tstamp_lo;
505 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
506 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
507 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
508 rxbd = enetc_rxbd_ext(rxbd);
509 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
513 tstamp = (u64)hi << 32 | tstamp_lo;
514 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
515 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
520 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
521 union enetc_rx_bd *rxbd, struct sk_buff *skb)
523 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
526 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
527 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
529 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
530 skb->ip_summed = CHECKSUM_COMPLETE;
533 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
536 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
538 tpid = htons(ETH_P_8021Q);
541 tpid = htons(ETH_P_8021AD);
544 tpid = htons(enetc_port_rd(&priv->si->hw,
548 tpid = htons(enetc_port_rd(&priv->si->hw,
555 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
558 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
559 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
560 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
564 static void enetc_process_skb(struct enetc_bdr *rx_ring,
567 skb_record_rx_queue(skb, rx_ring->index);
568 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
571 static bool enetc_page_reusable(struct page *page)
573 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
576 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
577 struct enetc_rx_swbd *old)
579 struct enetc_rx_swbd *new;
581 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
583 /* next buf that may reuse a page */
584 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
586 /* copy page reference */
590 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
593 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
595 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
596 rx_swbd->page_offset,
597 size, DMA_FROM_DEVICE);
601 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
602 struct enetc_rx_swbd *rx_swbd)
604 if (likely(enetc_page_reusable(rx_swbd->page))) {
605 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
606 page_ref_inc(rx_swbd->page);
608 enetc_reuse_page(rx_ring, rx_swbd);
610 /* sync for use by the device */
611 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
612 rx_swbd->page_offset,
616 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
617 PAGE_SIZE, DMA_FROM_DEVICE);
620 rx_swbd->page = NULL;
623 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
626 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
630 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
631 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
632 if (unlikely(!skb)) {
633 rx_ring->stats.rx_alloc_errs++;
637 skb_reserve(skb, ENETC_RXB_PAD);
638 __skb_put(skb, size);
640 enetc_put_rx_buff(rx_ring, rx_swbd);
645 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
646 u16 size, struct sk_buff *skb)
648 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
650 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
651 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
653 enetc_put_rx_buff(rx_ring, rx_swbd);
656 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
658 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
659 struct napi_struct *napi, int work_limit)
661 int rx_frm_cnt = 0, rx_byte_cnt = 0;
664 cleaned_cnt = enetc_bd_unused(rx_ring);
665 /* next descriptor to process */
666 i = rx_ring->next_to_clean;
668 while (likely(rx_frm_cnt < work_limit)) {
669 union enetc_rx_bd *rxbd;
674 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
675 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
677 /* update ENETC's consumer index */
678 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
679 cleaned_cnt -= count;
682 rxbd = enetc_rxbd(rx_ring, i);
683 bd_status = le32_to_cpu(rxbd->r.lstatus);
687 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
688 dma_rmb(); /* for reading other rxbd fields */
689 size = le16_to_cpu(rxbd->r.buf_len);
690 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
694 enetc_get_offloads(rx_ring, rxbd, skb);
698 enetc_rxbd_next(rx_ring, &rxbd, &i);
700 if (unlikely(bd_status &
701 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
703 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
705 bd_status = le32_to_cpu(rxbd->r.lstatus);
707 enetc_rxbd_next(rx_ring, &rxbd, &i);
710 rx_ring->ndev->stats.rx_dropped++;
711 rx_ring->ndev->stats.rx_errors++;
716 /* not last BD in frame? */
717 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
718 bd_status = le32_to_cpu(rxbd->r.lstatus);
719 size = ENETC_RXB_DMA_SIZE;
721 if (bd_status & ENETC_RXBD_LSTATUS_F) {
723 size = le16_to_cpu(rxbd->r.buf_len);
726 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
730 enetc_rxbd_next(rx_ring, &rxbd, &i);
733 rx_byte_cnt += skb->len;
735 enetc_process_skb(rx_ring, skb);
737 napi_gro_receive(napi, skb);
742 rx_ring->next_to_clean = i;
744 rx_ring->stats.packets += rx_frm_cnt;
745 rx_ring->stats.bytes += rx_byte_cnt;
750 /* Probing and Init */
751 #define ENETC_MAX_RFS_SIZE 64
752 void enetc_get_si_caps(struct enetc_si *si)
754 struct enetc_hw *hw = &si->hw;
757 /* find out how many of various resources we have to work with */
758 val = enetc_rd(hw, ENETC_SICAPR0);
759 si->num_rx_rings = (val >> 16) & 0xff;
760 si->num_tx_rings = val & 0xff;
762 val = enetc_rd(hw, ENETC_SIRFSCAPR);
763 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
764 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
767 val = enetc_rd(hw, ENETC_SIPCAPR0);
768 if (val & ENETC_SIPCAPR0_RSS) {
771 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
772 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
775 if (val & ENETC_SIPCAPR0_QBV)
776 si->hw_features |= ENETC_SI_F_QBV;
778 if (val & ENETC_SIPCAPR0_PSFP)
779 si->hw_features |= ENETC_SI_F_PSFP;
782 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
784 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
785 &r->bd_dma_base, GFP_KERNEL);
789 /* h/w requires 128B alignment */
790 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
791 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
799 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
803 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
807 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
813 txr->next_to_clean = 0;
814 txr->next_to_use = 0;
819 static void enetc_free_txbdr(struct enetc_bdr *txr)
823 for (i = 0; i < txr->bd_count; i++)
824 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
826 size = txr->bd_count * sizeof(union enetc_tx_bd);
828 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
835 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
839 for (i = 0; i < priv->num_tx_rings; i++) {
840 err = enetc_alloc_txbdr(priv->tx_ring[i]);
850 enetc_free_txbdr(priv->tx_ring[i]);
855 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
859 for (i = 0; i < priv->num_tx_rings; i++)
860 enetc_free_txbdr(priv->tx_ring[i]);
863 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
865 size_t size = sizeof(union enetc_rx_bd);
868 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
875 err = enetc_dma_alloc_bdr(rxr, size);
881 rxr->next_to_clean = 0;
882 rxr->next_to_use = 0;
883 rxr->next_to_alloc = 0;
884 rxr->ext_en = extended;
889 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
893 size = rxr->bd_count * sizeof(union enetc_rx_bd);
895 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
902 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
904 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
907 for (i = 0; i < priv->num_rx_rings; i++) {
908 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
918 enetc_free_rxbdr(priv->rx_ring[i]);
923 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
927 for (i = 0; i < priv->num_rx_rings; i++)
928 enetc_free_rxbdr(priv->rx_ring[i]);
931 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
935 if (!tx_ring->tx_swbd)
938 for (i = 0; i < tx_ring->bd_count; i++) {
939 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
941 enetc_free_tx_skb(tx_ring, tx_swbd);
944 tx_ring->next_to_clean = 0;
945 tx_ring->next_to_use = 0;
948 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
952 if (!rx_ring->rx_swbd)
955 for (i = 0; i < rx_ring->bd_count; i++) {
956 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
961 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
962 PAGE_SIZE, DMA_FROM_DEVICE);
963 __free_page(rx_swbd->page);
964 rx_swbd->page = NULL;
967 rx_ring->next_to_clean = 0;
968 rx_ring->next_to_use = 0;
969 rx_ring->next_to_alloc = 0;
972 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
976 for (i = 0; i < priv->num_rx_rings; i++)
977 enetc_free_rx_ring(priv->rx_ring[i]);
979 for (i = 0; i < priv->num_tx_rings; i++)
980 enetc_free_tx_ring(priv->tx_ring[i]);
983 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
988 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
992 /* Set up RSS table defaults */
993 for (i = 0; i < si->num_rss; i++)
994 rss_table[i] = i % num_groups;
996 enetc_set_rss_table(si, rss_table, si->num_rss);
1003 int enetc_configure_si(struct enetc_ndev_priv *priv)
1005 struct enetc_si *si = priv->si;
1006 struct enetc_hw *hw = &si->hw;
1009 /* set SI cache attributes */
1010 enetc_wr(hw, ENETC_SICAR0,
1011 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1012 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1014 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1017 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1025 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1027 struct enetc_si *si = priv->si;
1028 int cpus = num_online_cpus();
1030 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1031 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1033 /* Enable all available TX rings in order to configure as many
1034 * priorities as possible, when needed.
1035 * TODO: Make # of TX rings run-time configurable
1037 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1038 priv->num_tx_rings = si->num_tx_rings;
1039 priv->bdr_int_num = cpus;
1040 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1041 priv->tx_ictt = ENETC_TXIC_TIMETHR;
1044 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1046 struct enetc_si *si = priv->si;
1048 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1050 if (!priv->cls_rules)
1056 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1058 kfree(priv->cls_rules);
1061 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1063 int idx = tx_ring->index;
1066 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1067 lower_32_bits(tx_ring->bd_dma_base));
1069 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1070 upper_32_bits(tx_ring->bd_dma_base));
1072 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1073 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1074 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1076 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1077 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1078 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1080 /* enable Tx ints by setting pkt thr to 1 */
1081 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1083 tbmr = ENETC_TBMR_EN;
1084 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1085 tbmr |= ENETC_TBMR_VIH;
1088 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1090 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1091 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1092 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1095 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1097 int idx = rx_ring->index;
1100 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1101 lower_32_bits(rx_ring->bd_dma_base));
1103 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1104 upper_32_bits(rx_ring->bd_dma_base));
1106 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1107 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1108 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1110 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1112 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1114 /* enable Rx ints by setting pkt thr to 1 */
1115 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1117 rbmr = ENETC_RBMR_EN;
1119 if (rx_ring->ext_en)
1120 rbmr |= ENETC_RBMR_BDS;
1122 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1123 rbmr |= ENETC_RBMR_VTE;
1125 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1126 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1128 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1129 /* update ENETC's consumer index */
1130 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
1133 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1136 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1140 for (i = 0; i < priv->num_tx_rings; i++)
1141 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1143 for (i = 0; i < priv->num_rx_rings; i++)
1144 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1147 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1149 int idx = rx_ring->index;
1151 /* disable EN bit on ring */
1152 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1155 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1157 int delay = 8, timeout = 100;
1158 int idx = tx_ring->index;
1160 /* disable EN bit on ring */
1161 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1163 /* wait for busy to clear */
1164 while (delay < timeout &&
1165 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1170 if (delay >= timeout)
1171 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1175 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1179 for (i = 0; i < priv->num_tx_rings; i++)
1180 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1182 for (i = 0; i < priv->num_rx_rings; i++)
1183 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1188 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1190 struct pci_dev *pdev = priv->si->pdev;
1194 for (i = 0; i < priv->bdr_int_num; i++) {
1195 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1196 struct enetc_int_vector *v = priv->int_vector[i];
1197 int entry = ENETC_BDR_INT_BASE_IDX + i;
1198 struct enetc_hw *hw = &priv->si->hw;
1200 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1201 priv->ndev->name, i);
1202 err = request_irq(irq, enetc_msix, 0, v->name, v);
1204 dev_err(priv->dev, "request_irq() failed!\n");
1209 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1210 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1211 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1213 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1215 for (j = 0; j < v->count_tx_rings; j++) {
1216 int idx = v->tx_ring[j].index;
1218 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1220 cpumask_clear(&cpu_mask);
1221 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1222 irq_set_affinity_hint(irq, &cpu_mask);
1229 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1231 irq_set_affinity_hint(irq, NULL);
1232 free_irq(irq, priv->int_vector[i]);
1238 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1240 struct pci_dev *pdev = priv->si->pdev;
1243 for (i = 0; i < priv->bdr_int_num; i++) {
1244 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1246 irq_set_affinity_hint(irq, NULL);
1247 free_irq(irq, priv->int_vector[i]);
1251 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1253 struct enetc_hw *hw = &priv->si->hw;
1257 /* enable Tx & Rx event indication */
1259 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1260 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1261 /* init to non-0 minimum, will be adjusted later */
1264 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1268 for (i = 0; i < priv->num_rx_rings; i++) {
1269 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1270 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1271 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1274 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1275 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1277 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1279 for (i = 0; i < priv->num_tx_rings; i++) {
1280 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1281 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1282 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1286 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1290 for (i = 0; i < priv->num_tx_rings; i++)
1291 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1293 for (i = 0; i < priv->num_rx_rings; i++)
1294 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1297 static int enetc_phylink_connect(struct net_device *ndev)
1299 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1300 struct ethtool_eee edata;
1304 return 0; /* phy-less mode */
1306 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1308 dev_err(&ndev->dev, "could not attach to PHY\n");
1312 /* disable EEE autoneg, until ENETC driver supports it */
1313 memset(&edata, 0, sizeof(struct ethtool_eee));
1314 phylink_ethtool_set_eee(priv->phylink, &edata);
1319 void enetc_start(struct net_device *ndev)
1321 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1324 enetc_setup_interrupts(priv);
1326 for (i = 0; i < priv->bdr_int_num; i++) {
1327 int irq = pci_irq_vector(priv->si->pdev,
1328 ENETC_BDR_INT_BASE_IDX + i);
1330 napi_enable(&priv->int_vector[i]->napi);
1335 phylink_start(priv->phylink);
1337 netif_carrier_on(ndev);
1339 netif_tx_start_all_queues(ndev);
1342 int enetc_open(struct net_device *ndev)
1344 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1347 err = enetc_setup_irqs(priv);
1351 err = enetc_phylink_connect(ndev);
1353 goto err_phy_connect;
1355 err = enetc_alloc_tx_resources(priv);
1359 err = enetc_alloc_rx_resources(priv);
1363 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1365 goto err_set_queues;
1367 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1369 goto err_set_queues;
1371 enetc_setup_bdrs(priv);
1377 enetc_free_rx_resources(priv);
1379 enetc_free_tx_resources(priv);
1382 phylink_disconnect_phy(priv->phylink);
1384 enetc_free_irqs(priv);
1389 void enetc_stop(struct net_device *ndev)
1391 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1394 netif_tx_stop_all_queues(ndev);
1396 for (i = 0; i < priv->bdr_int_num; i++) {
1397 int irq = pci_irq_vector(priv->si->pdev,
1398 ENETC_BDR_INT_BASE_IDX + i);
1401 napi_synchronize(&priv->int_vector[i]->napi);
1402 napi_disable(&priv->int_vector[i]->napi);
1406 phylink_stop(priv->phylink);
1408 netif_carrier_off(ndev);
1410 enetc_clear_interrupts(priv);
1413 int enetc_close(struct net_device *ndev)
1415 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1418 enetc_clear_bdrs(priv);
1421 phylink_disconnect_phy(priv->phylink);
1422 enetc_free_rxtx_rings(priv);
1423 enetc_free_rx_resources(priv);
1424 enetc_free_tx_resources(priv);
1425 enetc_free_irqs(priv);
1430 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1432 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1433 struct tc_mqprio_qopt *mqprio = type_data;
1434 struct enetc_bdr *tx_ring;
1438 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1439 num_tc = mqprio->num_tc;
1442 netdev_reset_tc(ndev);
1443 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1445 /* Reset all ring priorities to 0 */
1446 for (i = 0; i < priv->num_tx_rings; i++) {
1447 tx_ring = priv->tx_ring[i];
1448 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1454 /* Check if we have enough BD rings available to accommodate all TCs */
1455 if (num_tc > priv->num_tx_rings) {
1456 netdev_err(ndev, "Max %d traffic classes supported\n",
1457 priv->num_tx_rings);
1461 /* For the moment, we use only one BD ring per TC.
1463 * Configure num_tc BD rings with increasing priorities.
1465 for (i = 0; i < num_tc; i++) {
1466 tx_ring = priv->tx_ring[i];
1467 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1470 /* Reset the number of netdev queues based on the TC count */
1471 netif_set_real_num_tx_queues(ndev, num_tc);
1473 netdev_set_num_tc(ndev, num_tc);
1475 /* Each TC is associated with one netdev queue */
1476 for (i = 0; i < num_tc; i++)
1477 netdev_set_tc_queue(ndev, i, 1, i);
1482 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1486 case TC_SETUP_QDISC_MQPRIO:
1487 return enetc_setup_tc_mqprio(ndev, type_data);
1488 case TC_SETUP_QDISC_TAPRIO:
1489 return enetc_setup_tc_taprio(ndev, type_data);
1490 case TC_SETUP_QDISC_CBS:
1491 return enetc_setup_tc_cbs(ndev, type_data);
1492 case TC_SETUP_QDISC_ETF:
1493 return enetc_setup_tc_txtime(ndev, type_data);
1494 case TC_SETUP_BLOCK:
1495 return enetc_setup_tc_psfp(ndev, type_data);
1501 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1503 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1504 struct net_device_stats *stats = &ndev->stats;
1505 unsigned long packets = 0, bytes = 0;
1508 for (i = 0; i < priv->num_rx_rings; i++) {
1509 packets += priv->rx_ring[i]->stats.packets;
1510 bytes += priv->rx_ring[i]->stats.bytes;
1513 stats->rx_packets = packets;
1514 stats->rx_bytes = bytes;
1518 for (i = 0; i < priv->num_tx_rings; i++) {
1519 packets += priv->tx_ring[i]->stats.packets;
1520 bytes += priv->tx_ring[i]->stats.bytes;
1523 stats->tx_packets = packets;
1524 stats->tx_bytes = bytes;
1529 static int enetc_set_rss(struct net_device *ndev, int en)
1531 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1532 struct enetc_hw *hw = &priv->si->hw;
1535 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1537 reg = enetc_rd(hw, ENETC_SIMR);
1538 reg &= ~ENETC_SIMR_RSSE;
1539 reg |= (en) ? ENETC_SIMR_RSSE : 0;
1540 enetc_wr(hw, ENETC_SIMR, reg);
1545 static int enetc_set_psfp(struct net_device *ndev, int en)
1547 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1551 err = enetc_psfp_enable(priv);
1555 priv->active_offloads |= ENETC_F_QCI;
1559 err = enetc_psfp_disable(priv);
1563 priv->active_offloads &= ~ENETC_F_QCI;
1568 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1570 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1573 for (i = 0; i < priv->num_rx_rings; i++)
1574 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
1577 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1579 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1582 for (i = 0; i < priv->num_tx_rings; i++)
1583 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1586 int enetc_set_features(struct net_device *ndev,
1587 netdev_features_t features)
1589 netdev_features_t changed = ndev->features ^ features;
1592 if (changed & NETIF_F_RXHASH)
1593 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1595 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1596 enetc_enable_rxvlan(ndev,
1597 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
1599 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1600 enetc_enable_txvlan(ndev,
1601 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1603 if (changed & NETIF_F_HW_TC)
1604 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1609 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1610 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1612 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1613 struct hwtstamp_config config;
1616 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1619 switch (config.tx_type) {
1620 case HWTSTAMP_TX_OFF:
1621 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1623 case HWTSTAMP_TX_ON:
1624 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1630 ao = priv->active_offloads;
1631 switch (config.rx_filter) {
1632 case HWTSTAMP_FILTER_NONE:
1633 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1636 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1637 config.rx_filter = HWTSTAMP_FILTER_ALL;
1640 if (netif_running(ndev) && ao != priv->active_offloads) {
1645 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1649 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1651 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1652 struct hwtstamp_config config;
1656 if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1657 config.tx_type = HWTSTAMP_TX_ON;
1659 config.tx_type = HWTSTAMP_TX_OFF;
1661 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1662 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1664 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1669 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1671 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1672 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1673 if (cmd == SIOCSHWTSTAMP)
1674 return enetc_hwtstamp_set(ndev, rq);
1675 if (cmd == SIOCGHWTSTAMP)
1676 return enetc_hwtstamp_get(ndev, rq);
1682 return phylink_mii_ioctl(priv->phylink, rq, cmd);
1685 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1687 struct pci_dev *pdev = priv->si->pdev;
1689 int i, n, err, nvec;
1691 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1692 /* allocate MSIX for both messaging and Rx/Tx interrupts */
1693 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1701 /* # of tx rings per int vector */
1702 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1704 for (i = 0; i < priv->bdr_int_num; i++) {
1705 struct enetc_int_vector *v;
1706 struct enetc_bdr *bdr;
1709 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
1715 priv->int_vector[i] = v;
1717 /* init defaults for adaptive IC */
1718 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
1720 v->rx_dim_en = true;
1722 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
1723 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1725 v->count_tx_rings = v_tx_rings;
1727 for (j = 0; j < v_tx_rings; j++) {
1730 /* default tx ring mapping policy */
1731 if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1732 idx = 2 * j + i; /* 2 CPUs */
1734 idx = j + i * v_tx_rings; /* default */
1736 __set_bit(idx, &v->tx_rings_map);
1737 bdr = &v->tx_ring[j];
1739 bdr->ndev = priv->ndev;
1740 bdr->dev = priv->dev;
1741 bdr->bd_count = priv->tx_bd_count;
1742 priv->tx_ring[idx] = bdr;
1747 bdr->ndev = priv->ndev;
1748 bdr->dev = priv->dev;
1749 bdr->bd_count = priv->rx_bd_count;
1750 priv->rx_ring[i] = bdr;
1757 netif_napi_del(&priv->int_vector[i]->napi);
1758 cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
1759 kfree(priv->int_vector[i]);
1762 pci_free_irq_vectors(pdev);
1767 void enetc_free_msix(struct enetc_ndev_priv *priv)
1771 for (i = 0; i < priv->bdr_int_num; i++) {
1772 struct enetc_int_vector *v = priv->int_vector[i];
1774 netif_napi_del(&v->napi);
1775 cancel_work_sync(&v->rx_dim.work);
1778 for (i = 0; i < priv->num_rx_rings; i++)
1779 priv->rx_ring[i] = NULL;
1781 for (i = 0; i < priv->num_tx_rings; i++)
1782 priv->tx_ring[i] = NULL;
1784 for (i = 0; i < priv->bdr_int_num; i++) {
1785 kfree(priv->int_vector[i]);
1786 priv->int_vector[i] = NULL;
1789 /* disable all MSIX for this device */
1790 pci_free_irq_vectors(priv->si->pdev);
1793 static void enetc_kfree_si(struct enetc_si *si)
1795 char *p = (char *)si - si->pad;
1800 static void enetc_detect_errata(struct enetc_si *si)
1802 if (si->pdev->revision == ENETC_REV1)
1803 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
1806 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1808 struct enetc_si *si, *p;
1809 struct enetc_hw *hw;
1814 err = pci_enable_device_mem(pdev);
1816 dev_err(&pdev->dev, "device enable failed\n");
1820 /* set up for high or low dma */
1821 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1823 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1826 "DMA configuration failed: 0x%x\n", err);
1831 err = pci_request_mem_regions(pdev, name);
1833 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1834 goto err_pci_mem_reg;
1837 pci_set_master(pdev);
1839 alloc_size = sizeof(struct enetc_si);
1841 /* align priv to 32B */
1842 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1843 alloc_size += sizeof_priv;
1845 /* force 32B alignment for enetc_si */
1846 alloc_size += ENETC_SI_ALIGN - 1;
1848 p = kzalloc(alloc_size, GFP_KERNEL);
1854 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1855 si->pad = (char *)si - (char *)p;
1857 pci_set_drvdata(pdev, si);
1861 len = pci_resource_len(pdev, ENETC_BAR_REGS);
1862 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1865 dev_err(&pdev->dev, "ioremap() failed\n");
1868 if (len > ENETC_PORT_BASE)
1869 hw->port = hw->reg + ENETC_PORT_BASE;
1870 if (len > ENETC_GLOBAL_BASE)
1871 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1873 enetc_detect_errata(si);
1880 pci_release_mem_regions(pdev);
1883 pci_disable_device(pdev);
1888 void enetc_pci_remove(struct pci_dev *pdev)
1890 struct enetc_si *si = pci_get_drvdata(pdev);
1891 struct enetc_hw *hw = &si->hw;
1895 pci_release_mem_regions(pdev);
1896 pci_disable_device(pdev);