1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/device.h>
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include "hclgevf_cmd.h"
11 #include "hclgevf_main.h"
14 #define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
15 #define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
16 DMA_TO_DEVICE : DMA_FROM_DEVICE)
17 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
19 static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
21 int ntc = ring->next_to_clean;
22 int ntu = ring->next_to_use;
25 used = (ntu - ntc + ring->desc_num) % ring->desc_num;
27 return ring->desc_num - used - 1;
30 static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
33 int ntu = ring->next_to_use;
34 int ntc = ring->next_to_clean;
37 return head >= ntc && head <= ntu;
39 return head >= ntc || head <= ntu;
42 static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
44 struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
45 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
49 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
50 rmb(); /* Make sure head is ready before touch any data */
52 if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
53 dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
54 csq->next_to_use, csq->next_to_clean);
55 dev_warn(&hdev->pdev->dev,
56 "Disabling any further commands to IMP firmware\n");
57 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
61 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
62 csq->next_to_clean = head;
66 static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
70 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
72 return head == hw->cmq.csq.next_to_use;
75 static bool hclgevf_is_special_opcode(u16 opcode)
77 u16 spec_opcode[] = {0x30, 0x31, 0x32};
80 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
81 if (spec_opcode[i] == opcode)
88 static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
90 struct hclgevf_dev *hdev = ring->dev;
91 struct hclgevf_hw *hw = &hdev->hw;
94 if (ring->flag == HCLGEVF_TYPE_CSQ) {
95 reg_val = (u32)ring->desc_dma_addr;
96 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
97 reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
98 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
100 reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
101 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
103 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
104 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
106 reg_val = (u32)ring->desc_dma_addr;
107 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
108 reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
109 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
111 reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
112 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
114 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
115 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
119 static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
121 hclgevf_cmd_config_regs(&hw->cmq.csq);
122 hclgevf_cmd_config_regs(&hw->cmq.crq);
125 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
127 int size = ring->desc_num * sizeof(struct hclgevf_desc);
129 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
130 &ring->desc_dma_addr, GFP_KERNEL);
137 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
139 int size = ring->desc_num * sizeof(struct hclgevf_desc);
142 dma_free_coherent(cmq_ring_to_dev(ring), size,
143 ring->desc, ring->desc_dma_addr);
148 static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
150 struct hclgevf_hw *hw = &hdev->hw;
151 struct hclgevf_cmq_ring *ring =
152 (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
156 ring->flag = ring_type;
158 /* allocate CSQ/CRQ descriptor */
159 ret = hclgevf_alloc_cmd_desc(ring);
161 dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
162 (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
167 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
168 enum hclgevf_opcode_type opcode, bool is_read)
170 memset(desc, 0, sizeof(struct hclgevf_desc));
171 desc->opcode = cpu_to_le16(opcode);
172 desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
173 HCLGEVF_CMD_FLAG_IN);
175 desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
177 desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
180 static int hclgevf_cmd_convert_err_code(u16 desc_ret)
183 case HCLGEVF_CMD_EXEC_SUCCESS:
185 case HCLGEVF_CMD_NO_AUTH:
187 case HCLGEVF_CMD_NOT_SUPPORTED:
189 case HCLGEVF_CMD_QUEUE_FULL:
191 case HCLGEVF_CMD_NEXT_ERR:
193 case HCLGEVF_CMD_UNEXE_ERR:
195 case HCLGEVF_CMD_PARA_ERR:
197 case HCLGEVF_CMD_RESULT_ERR:
199 case HCLGEVF_CMD_TIMEOUT:
201 case HCLGEVF_CMD_HILINK_ERR:
203 case HCLGEVF_CMD_QUEUE_ILLEGAL:
205 case HCLGEVF_CMD_INVALID:
212 /* hclgevf_cmd_send - send command to command queue
213 * @hw: pointer to the hw struct
214 * @desc: prefilled descriptor for describing the command
215 * @num : the number of descriptors to be sent
217 * This is the main send command for command queue, it
218 * sends the queue, cleans the queue, etc
220 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
222 struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
223 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
224 struct hclgevf_desc *desc_to_use;
225 bool complete = false;
233 spin_lock_bh(&hw->cmq.csq.lock);
235 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
236 spin_unlock_bh(&hw->cmq.csq.lock);
240 if (num > hclgevf_ring_space(&hw->cmq.csq)) {
241 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
242 * need update the SW HEAD pointer csq->next_to_clean
244 csq->next_to_clean = hclgevf_read_dev(hw,
245 HCLGEVF_NIC_CSQ_HEAD_REG);
246 spin_unlock_bh(&hw->cmq.csq.lock);
250 /* Record the location of desc in the ring for this time
251 * which will be use for hardware to write back
253 ntc = hw->cmq.csq.next_to_use;
254 opcode = le16_to_cpu(desc[0].opcode);
255 while (handle < num) {
256 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
257 *desc_to_use = desc[handle];
258 (hw->cmq.csq.next_to_use)++;
259 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
260 hw->cmq.csq.next_to_use = 0;
264 /* Write to hardware */
265 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
266 hw->cmq.csq.next_to_use);
268 /* If the command is sync, wait for the firmware to write back,
269 * if multi descriptors to be sent, use the first one to check
271 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
273 if (hclgevf_cmd_csq_done(hw))
277 } while (timeout < hw->cmq.tx_timeout);
280 if (hclgevf_cmd_csq_done(hw)) {
284 while (handle < num) {
285 /* Get the result of hardware write back */
286 desc_to_use = &hw->cmq.csq.desc[ntc];
287 desc[handle] = *desc_to_use;
289 if (likely(!hclgevf_is_special_opcode(opcode)))
290 retval = le16_to_cpu(desc[handle].retval);
292 retval = le16_to_cpu(desc[0].retval);
294 status = hclgevf_cmd_convert_err_code(retval);
295 hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
298 if (ntc == hw->cmq.csq.desc_num)
306 /* Clean the command send queue */
307 handle = hclgevf_cmd_csq_clean(hw);
309 dev_warn(&hdev->pdev->dev,
310 "cleaned %d, need to clean %d\n", handle, num);
312 spin_unlock_bh(&hw->cmq.csq.lock);
317 static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
320 struct hclgevf_query_version_cmd *resp;
321 struct hclgevf_desc desc;
324 resp = (struct hclgevf_query_version_cmd *)desc.data;
326 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
327 status = hclgevf_cmd_send(hw, &desc, 1);
329 *version = le32_to_cpu(resp->firmware);
334 int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
338 /* Setup the lock for command queue */
339 spin_lock_init(&hdev->hw.cmq.csq.lock);
340 spin_lock_init(&hdev->hw.cmq.crq.lock);
342 hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
343 hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
344 hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
346 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
348 dev_err(&hdev->pdev->dev,
349 "CSQ ring setup error %d\n", ret);
353 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
355 dev_err(&hdev->pdev->dev,
356 "CRQ ring setup error %d\n", ret);
362 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
366 int hclgevf_cmd_init(struct hclgevf_dev *hdev)
371 spin_lock_bh(&hdev->hw.cmq.csq.lock);
372 spin_lock(&hdev->hw.cmq.crq.lock);
374 /* initialize the pointers of async rx queue of mailbox */
375 hdev->arq.hdev = hdev;
378 atomic_set(&hdev->arq.count, 0);
379 hdev->hw.cmq.csq.next_to_clean = 0;
380 hdev->hw.cmq.csq.next_to_use = 0;
381 hdev->hw.cmq.crq.next_to_clean = 0;
382 hdev->hw.cmq.crq.next_to_use = 0;
384 hclgevf_cmd_init_regs(&hdev->hw);
386 spin_unlock(&hdev->hw.cmq.crq.lock);
387 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
389 clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
391 /* Check if there is new reset pending, because the higher level
392 * reset may happen when lower level reset is being processed.
394 if (hclgevf_is_reset_pending(hdev)) {
399 /* get firmware version */
400 ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
402 dev_err(&hdev->pdev->dev,
403 "failed(%d) to query firmware version\n", ret);
406 hdev->fw_version = version;
408 dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
413 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
418 static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
420 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
421 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
422 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
423 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
424 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
425 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
426 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
427 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
428 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
429 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
432 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
434 spin_lock_bh(&hdev->hw.cmq.csq.lock);
435 spin_lock(&hdev->hw.cmq.crq.lock);
436 clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
437 hclgevf_cmd_uninit_regs(&hdev->hw);
438 spin_unlock(&hdev->hw.cmq.crq.lock);
439 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
440 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
441 hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);