net: hns3: use atomic_t replace u32 for arq's count
authorHuazhong Tan <tanhuazhong@huawei.com>
Thu, 25 Apr 2019 12:42:49 +0000 (20:42 +0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 26 Apr 2019 16:13:28 +0000 (12:13 -0400)
Since irq handler and mailbox task will both update arq's count,
so arq's count should use atomic_t instead of u32, otherwise
its value may go wrong finally.

Fixes: 07a0556a3a73 ("net: hns3: Changes to support ARQ(Asynchronous Receive Queue)")
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c

index 360463a..8b6191f 100644 (file)
@@ -111,7 +111,7 @@ struct hclgevf_mbx_arq_ring {
        struct hclgevf_dev *hdev;
        u32 head;
        u32 tail;
-       u32 count;
+       atomic_t count;
        u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
 };
 
index 1b428d4..71f356f 100644 (file)
@@ -340,7 +340,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
        hdev->arq.hdev = hdev;
        hdev->arq.head = 0;
        hdev->arq.tail = 0;
-       hdev->arq.count = 0;
+       atomic_set(&hdev->arq.count, 0);
        hdev->hw.cmq.csq.next_to_clean = 0;
        hdev->hw.cmq.csq.next_to_use = 0;
        hdev->hw.cmq.crq.next_to_clean = 0;
index eb56287..3c22639 100644 (file)
@@ -212,7 +212,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                        /* we will drop the async msg if we find ARQ as full
                         * and continue with next message
                         */
-                       if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+                       if (atomic_read(&hdev->arq.count) >=
+                           HCLGE_MBX_MAX_ARQ_MSG_NUM) {
                                dev_warn(&hdev->pdev->dev,
                                         "Async Q full, dropping msg(%d)\n",
                                         req->msg[1]);
@@ -224,7 +225,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                        memcpy(&msg_q[0], req->msg,
                               HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
                        hclge_mbx_tail_ptr_move_arq(hdev->arq);
-                       hdev->arq.count++;
+                       atomic_inc(&hdev->arq.count);
 
                        hclgevf_mbx_task_schedule(hdev);
 
@@ -317,7 +318,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                }
 
                hclge_mbx_head_ptr_move_arq(hdev->arq);
-               hdev->arq.count--;
+               atomic_dec(&hdev->arq.count);
                msg_q = hdev->arq.msg_q[hdev->arq.head];
        }
 }