Merge tag 'perf-core-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
index 1d69fe0..56b46b8 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <net/page_pool.h>
+#include <linux/align.h>
 
 #include "bnxt_hsi.h"
 #include "bnxt.h"
@@ -738,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
                page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
-       *mapping += bp->rx_dma_offset;
        return page;
 }
 
@@ -780,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
                if (!page)
                        return -ENOMEM;
 
+               mapping += bp->rx_dma_offset;
                rx_buf->data = page;
                rx_buf->data_ptr = page_address(page) + bp->rx_offset;
        } else {
@@ -840,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        u16 sw_prod = rxr->rx_sw_agg_prod;
        unsigned int offset = 0;
 
-       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
-               page = rxr->rx_page;
-               if (!page) {
+       if (BNXT_RX_PAGE_MODE(bp)) {
+               page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+               if (!page)
+                       return -ENOMEM;
+
+       } else {
+               if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+                       page = rxr->rx_page;
+                       if (!page) {
+                               page = alloc_page(gfp);
+                               if (!page)
+                                       return -ENOMEM;
+                               rxr->rx_page = page;
+                               rxr->rx_page_offset = 0;
+                       }
+                       offset = rxr->rx_page_offset;
+                       rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+                       if (rxr->rx_page_offset == PAGE_SIZE)
+                               rxr->rx_page = NULL;
+                       else
+                               get_page(page);
+               } else {
                        page = alloc_page(gfp);
                        if (!page)
                                return -ENOMEM;
-                       rxr->rx_page = page;
-                       rxr->rx_page_offset = 0;
                }
-               offset = rxr->rx_page_offset;
-               rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
-               if (rxr->rx_page_offset == PAGE_SIZE)
-                       rxr->rx_page = NULL;
-               else
-                       get_page(page);
-       } else {
-               page = alloc_page(gfp);
-               if (!page)
-                       return -ENOMEM;
-       }
 
-       mapping = dma_map_page_attrs(&pdev->dev, page, offset,
-                                    BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-                                    DMA_ATTR_WEAK_ORDERING);
-       if (dma_mapping_error(&pdev->dev, mapping)) {
-               __free_page(page);
-               return -EIO;
+               mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+                                            BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+                                            DMA_ATTR_WEAK_ORDERING);
+               if (dma_mapping_error(&pdev->dev, mapping)) {
+                       __free_page(page);
+                       return -EIO;
+               }
        }
 
        if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -962,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
        rxr->rx_sw_agg_prod = sw_prod;
 }
 
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+                                             struct bnxt_rx_ring_info *rxr,
+                                             u16 cons, void *data, u8 *data_ptr,
+                                             dma_addr_t dma_addr,
+                                             unsigned int offset_and_len)
+{
+       unsigned int len = offset_and_len & 0xffff;
+       struct page *page = data;
+       u16 prod = rxr->rx_prod;
+       struct sk_buff *skb;
+       int err;
+
+       err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+       if (unlikely(err)) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               return NULL;
+       }
+       dma_addr -= bp->rx_dma_offset;
+       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+                            DMA_ATTR_WEAK_ORDERING);
+       skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
+                                           bp->rx_dma_offset);
+       if (!skb) {
+               __free_page(page);
+               return NULL;
+       }
+       skb_mark_for_recycle(skb);
+       skb_reserve(skb, bp->rx_dma_offset);
+       __skb_put(skb, len);
+
+       return skb;
+}
+
 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
                                        struct bnxt_rx_ring_info *rxr,
                                        u16 cons, void *data, u8 *data_ptr,
@@ -984,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
        dma_addr -= bp->rx_dma_offset;
        dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
                             DMA_ATTR_WEAK_ORDERING);
-       page_pool_release_page(rxr->page_pool, page);
 
        if (unlikely(!payload))
                payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -995,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
                return NULL;
        }
 
+       skb_mark_for_recycle(skb);
        off = (void *)data_ptr - page_address(page);
        skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
        memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1038,22 +1080,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
        return skb;
 }
 
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
-                                    struct bnxt_cp_ring_info *cpr,
-                                    struct sk_buff *skb, u16 idx,
-                                    u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+                              struct bnxt_cp_ring_info *cpr,
+                              struct skb_shared_info *shinfo,
+                              u16 idx, u32 agg_bufs, bool tpa,
+                              struct xdp_buff *xdp)
 {
        struct bnxt_napi *bnapi = cpr->bnapi;
        struct pci_dev *pdev = bp->pdev;
        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
        u16 prod = rxr->rx_agg_prod;
+       u32 i, total_frag_len = 0;
        bool p5_tpa = false;
-       u32 i;
 
        if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
                p5_tpa = true;
 
        for (i = 0; i < agg_bufs; i++) {
+               skb_frag_t *frag = &shinfo->frags[i];
                u16 cons, frag_len;
                struct rx_agg_cmp *agg;
                struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1069,8 +1113,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 
                cons_rx_buf = &rxr->rx_agg_ring[cons];
-               skb_fill_page_desc(skb, i, cons_rx_buf->page,
-                                  cons_rx_buf->offset, frag_len);
+               skb_frag_off_set(frag, cons_rx_buf->offset);
+               skb_frag_size_set(frag, frag_len);
+               __skb_frag_set_page(frag, cons_rx_buf->page);
+               shinfo->nr_frags = i + 1;
                __clear_bit(cons, rxr->rx_agg_bmap);
 
                /* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1081,16 +1127,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
                page = cons_rx_buf->page;
                cons_rx_buf->page = NULL;
 
+               if (xdp && page_is_pfmemalloc(page))
+                       xdp_buff_set_frag_pfmemalloc(xdp);
+
                if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
-                       struct skb_shared_info *shinfo;
                        unsigned int nr_frags;
 
-                       shinfo = skb_shinfo(skb);
                        nr_frags = --shinfo->nr_frags;
                        __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
-                       dev_kfree_skb(skb);
-
                        cons_rx_buf->page = page;
 
                        /* Update prod since possibly some pages have been
@@ -1098,23 +1142,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
                         */
                        rxr->rx_agg_prod = prod;
                        bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
-                       return NULL;
+                       return 0;
                }
 
                dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
-                                    DMA_FROM_DEVICE,
+                                    bp->rx_dir,
                                     DMA_ATTR_WEAK_ORDERING);
 
-               skb->data_len += frag_len;
-               skb->len += frag_len;
-               skb->truesize += PAGE_SIZE;
-
+               total_frag_len += frag_len;
                prod = NEXT_RX_AGG(prod);
        }
        rxr->rx_agg_prod = prod;
+       return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+                                            struct bnxt_cp_ring_info *cpr,
+                                            struct sk_buff *skb, u16 idx,
+                                            u32 agg_bufs, bool tpa)
+{
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       u32 total_frag_len = 0;
+
+       total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+                                            agg_bufs, tpa, NULL);
+       if (!total_frag_len) {
+               dev_kfree_skb(skb);
+               return NULL;
+       }
+
+       skb->data_len += total_frag_len;
+       skb->len += total_frag_len;
+       skb->truesize += PAGE_SIZE * agg_bufs;
        return skb;
 }
 
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+                                struct bnxt_cp_ring_info *cpr,
+                                struct xdp_buff *xdp, u16 idx,
+                                u32 agg_bufs, bool tpa)
+{
+       struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+       u32 total_frag_len = 0;
+
+       if (!xdp_buff_has_frags(xdp))
+               shinfo->nr_frags = 0;
+
+       total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+                                            idx, agg_bufs, tpa, xdp);
+       if (total_frag_len) {
+               xdp_buff_set_frags_flag(xdp);
+               shinfo->nr_frags = agg_bufs;
+               shinfo->xdp_frags_size = total_frag_len;
+       }
+       return total_frag_len;
+}
+
 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                               u8 agg_bufs, u32 *raw_cons)
 {
@@ -1644,7 +1727,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        }
 
        if (agg_bufs) {
-               skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+               skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
                if (!skb) {
                        /* Page reuse already handled by bnxt_rx_pages(). */
                        cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1729,8 +1812,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        struct bnxt_sw_rx_bd *rx_buf;
        unsigned int len;
        u8 *data_ptr, agg_bufs, cmp_type;
+       bool xdp_active = false;
        dma_addr_t dma_addr;
        struct sk_buff *skb;
+       struct xdp_buff xdp;
        u32 flags, misc;
        void *data;
        int rc = 0;
@@ -1839,18 +1924,39 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        len = flags >> RX_CMP_LEN_SHIFT;
        dma_addr = rx_buf->mapping;
 
-       if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
-               rc = 1;
-               goto next_rx;
+       if (bnxt_xdp_attached(bp, rxr)) {
+               bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+               if (agg_bufs) {
+                       u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+                                                            cp_cons, agg_bufs,
+                                                            false);
+                       if (!frag_len) {
+                               cpr->sw_stats.rx.rx_oom_discards += 1;
+                               rc = -ENOMEM;
+                               goto next_rx;
+                       }
+               }
+               xdp_active = true;
+       }
+
+       if (xdp_active) {
+               if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+                       rc = 1;
+                       goto next_rx;
+               }
        }
 
        if (len <= bp->rx_copy_thresh) {
                skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
                bnxt_reuse_rx_data(rxr, cons, data);
                if (!skb) {
-                       if (agg_bufs)
-                               bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
-                                                      agg_bufs, false);
+                       if (agg_bufs) {
+                               if (!xdp_active)
+                                       bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+                                                              agg_bufs, false);
+                               else
+                                       bnxt_xdp_buff_frags_free(rxr, &xdp);
+                       }
                        cpr->sw_stats.rx.rx_oom_discards += 1;
                        rc = -ENOMEM;
                        goto next_rx;
@@ -1872,11 +1978,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        }
 
        if (agg_bufs) {
-               skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
-               if (!skb) {
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
-                       rc = -ENOMEM;
-                       goto next_rx;
+               if (!xdp_active) {
+                       skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+                       if (!skb) {
+                               cpr->sw_stats.rx.rx_oom_discards += 1;
+                               rc = -ENOMEM;
+                               goto next_rx;
+                       }
+               } else {
+                       skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+                       if (!skb) {
+                               /* we should be able to free the old skb here */
+                               bnxt_xdp_buff_frags_free(rxr, &xdp);
+                               cpr->sw_stats.rx.rx_oom_discards += 1;
+                               rc = -ENOMEM;
+                               goto next_rx;
+                       }
                }
        }
 
@@ -1923,7 +2040,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        }
 
        if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
-                    RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
+                    RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
                if (bp->flags & BNXT_FLAG_CHIP_P5) {
                        u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
                        u64 ns, ts;
@@ -2492,10 +2609,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
        if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 
-               if (bnapi->events & BNXT_AGG_EVENT)
-                       bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
                bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
        }
+       if (bnapi->events & BNXT_AGG_EVENT) {
+               struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+               bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+       }
        bnapi->events = 0;
 }
 
@@ -2876,14 +2996,23 @@ skip_rx_buf_free:
                if (!page)
                        continue;
 
-               dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
-                                    BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-                                    DMA_ATTR_WEAK_ORDERING);
+               if (BNXT_RX_PAGE_MODE(bp)) {
+                       dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+                                            BNXT_RX_PAGE_SIZE, bp->rx_dir,
+                                            DMA_ATTR_WEAK_ORDERING);
+                       rx_agg_buf->page = NULL;
+                       __clear_bit(i, rxr->rx_agg_bmap);
 
-               rx_agg_buf->page = NULL;
-               __clear_bit(i, rxr->rx_agg_bmap);
+                       page_pool_recycle_direct(rxr->page_pool, page);
+               } else {
+                       dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+                                            BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+                                            DMA_ATTR_WEAK_ORDERING);
+                       rx_agg_buf->page = NULL;
+                       __clear_bit(i, rxr->rx_agg_bmap);
 
-               __free_page(page);
+                       __free_page(page);
+               }
        }
 
 skip_rx_agg_free:
@@ -3797,7 +3926,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
        /* 8 for CRC and VLAN */
        rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
 
-       rx_space = rx_size + NET_SKB_PAD +
+       rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
        bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3838,9 +3967,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
                }
                bp->rx_agg_ring_size = agg_ring_size;
                bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
-               rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
-               rx_space = rx_size + NET_SKB_PAD +
-                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+               if (BNXT_RX_PAGE_MODE(bp)) {
+                       rx_space = BNXT_PAGE_MODE_BUF_SIZE;
+                       rx_size = BNXT_MAX_PAGE_MODE_MTU;
+               } else {
+                       rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+                       rx_space = rx_size + NET_SKB_PAD +
+                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+               }
        }
 
        bp->rx_buf_use_size = rx_size;
@@ -3881,14 +4016,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 {
        if (page_mode) {
-               if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
-                       return -EOPNOTSUPP;
-               bp->dev->max_mtu =
-                       min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
-               bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+               bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+               if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+                       bp->flags |= BNXT_FLAG_JUMBO;
+                       bp->rx_skb_func = bnxt_rx_multi_page_skb;
+                       bp->dev->max_mtu =
+                               min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+               } else {
+                       bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+                       bp->rx_skb_func = bnxt_rx_page_skb;
+                       bp->dev->max_mtu =
+                               min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+               }
                bp->rx_dir = DMA_BIDIRECTIONAL;
-               bp->rx_skb_func = bnxt_rx_page_skb;
                /* Disable LRO or GRO_HW */
                netdev_update_features(bp->dev);
        } else {
@@ -5230,12 +5372,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
        if (rc)
                return rc;
 
-       req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
-                                VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
-                                VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
-       req->enables =
-               cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
-                           VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+       req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+       req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+       if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+               req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+                                         VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+               req->enables |=
+                       cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+       }
        /* thresholds not implemented in firmware yet */
        req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
        req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
@@ -7514,7 +7659,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        struct hwrm_func_qcaps_output *resp;
        struct hwrm_func_qcaps_input *req;
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-       u32 flags, flags_ext;
+       u32 flags, flags_ext, flags_ext2;
        int rc;
 
        rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -7559,6 +7704,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
                bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
 
+       flags_ext2 = le32_to_cpu(resp->flags_ext2);
+       if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+               bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+
        bp->tx_push_thresh = 0;
        if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
            BNXT_FW_MAJ(bp) > 217)
@@ -10363,6 +10512,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        if (BNXT_PF(bp))
                bnxt_vf_reps_open(bp);
        bnxt_ptp_init_rtc(bp, true);
+       bnxt_ptp_cfg_tstamp_filters(bp);
        return 0;
 
 open_err_irq:
@@ -11035,6 +11185,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
        if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
                features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 
+       if (!(bp->flags & BNXT_FLAG_TPA))
+               features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
        if (!(features & NETIF_F_GRO))
                features &= ~NETIF_F_GRO_HW;