net: hns3: refactor hns3_nic_reuse_page()
authorHao Chen <chenhao288@hisilicon.com>
Mon, 29 Nov 2021 14:00:19 +0000 (22:00 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 29 Nov 2021 14:26:17 +0000 (14:26 +0000)
Split rx copybreak handle into a separate function from function
hns3_nic_reuse_page() to improve code simplicity.

Signed-off-by: Hao Chen <chenhao288@hisilicon.com>
Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c

index 3eb2985..731cefb 100644 (file)
@@ -3546,6 +3546,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
        return page_count(cb->priv) == cb->pagecnt_bias;
 }
 
+static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
+                                   struct hns3_enet_ring *ring,
+                                   int pull_len,
+                                   struct hns3_desc_cb *desc_cb)
+{
+       struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+       u32 frag_offset = desc_cb->page_offset + pull_len;
+       int size = le16_to_cpu(desc->rx.size);
+       u32 frag_size = size - pull_len;
+       void *frag = napi_alloc_frag(frag_size);
+
+       if (unlikely(!frag)) {
+               u64_stats_update_begin(&ring->syncp);
+               ring->stats.frag_alloc_err++;
+               u64_stats_update_end(&ring->syncp);
+
+               hns3_rl_err(ring_to_netdev(ring),
+                           "failed to allocate rx frag\n");
+               return -ENOMEM;
+       }
+
+       desc_cb->reuse_flag = 1;
+       memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+       skb_add_rx_frag(skb, i, virt_to_page(frag),
+                       offset_in_page(frag), frag_size, frag_size);
+
+       u64_stats_update_begin(&ring->syncp);
+       ring->stats.frag_alloc++;
+       u64_stats_update_end(&ring->syncp);
+       return 0;
+}
+
 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
                                struct hns3_enet_ring *ring, int pull_len,
                                struct hns3_desc_cb *desc_cb)
@@ -3555,6 +3587,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
        int size = le16_to_cpu(desc->rx.size);
        u32 truesize = hns3_buf_size(ring);
        u32 frag_size = size - pull_len;
+       int ret = 0;
        bool reused;
 
        if (ring->page_pool) {
@@ -3589,27 +3622,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
                desc_cb->page_offset = 0;
                desc_cb->reuse_flag = 1;
        } else if (frag_size <= ring->rx_copybreak) {
-               void *frag = napi_alloc_frag(frag_size);
-
-               if (unlikely(!frag)) {
-                       u64_stats_update_begin(&ring->syncp);
-                       ring->stats.frag_alloc_err++;
-                       u64_stats_update_end(&ring->syncp);
-
-                       hns3_rl_err(ring_to_netdev(ring),
-                                   "failed to allocate rx frag\n");
+               ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
+               if (ret)
                        goto out;
-               }
-
-               desc_cb->reuse_flag = 1;
-               memcpy(frag, desc_cb->buf + frag_offset, frag_size);
-               skb_add_rx_frag(skb, i, virt_to_page(frag),
-                               offset_in_page(frag), frag_size, frag_size);
-
-               u64_stats_update_begin(&ring->syncp);
-               ring->stats.frag_alloc++;
-               u64_stats_update_end(&ring->syncp);
-               return;
        }
 
 out: