Merge tag 'pci-v5.16-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[platform/kernel/linux-starfive.git] / drivers / crypto / hisilicon / qm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <asm/page.h>
4 #include <linux/acpi.h>
5 #include <linux/aer.h>
6 #include <linux/bitmap.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/idr.h>
9 #include <linux/io.h>
10 #include <linux/irqreturn.h>
11 #include <linux/log2.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/uacce.h>
16 #include <linux/uaccess.h>
17 #include <uapi/misc/uacce/hisi_qm.h>
18 #include "qm.h"
19
20 /* eq/aeq irq enable */
21 #define QM_VF_AEQ_INT_SOURCE            0x0
22 #define QM_VF_AEQ_INT_MASK              0x4
23 #define QM_VF_EQ_INT_SOURCE             0x8
24 #define QM_VF_EQ_INT_MASK               0xc
25 #define QM_IRQ_NUM_V1                   1
26 #define QM_IRQ_NUM_PF_V2                4
27 #define QM_IRQ_NUM_VF_V2                2
28 #define QM_IRQ_NUM_VF_V3                3
29
30 #define QM_EQ_EVENT_IRQ_VECTOR          0
31 #define QM_AEQ_EVENT_IRQ_VECTOR         1
32 #define QM_CMD_EVENT_IRQ_VECTOR         2
33 #define QM_ABNORMAL_EVENT_IRQ_VECTOR    3
34
35 /* mailbox */
36 #define QM_MB_CMD_SQC                   0x0
37 #define QM_MB_CMD_CQC                   0x1
38 #define QM_MB_CMD_EQC                   0x2
39 #define QM_MB_CMD_AEQC                  0x3
40 #define QM_MB_CMD_SQC_BT                0x4
41 #define QM_MB_CMD_CQC_BT                0x5
42 #define QM_MB_CMD_SQC_VFT_V2            0x6
43 #define QM_MB_CMD_STOP_QP               0x8
44 #define QM_MB_CMD_SRC                   0xc
45 #define QM_MB_CMD_DST                   0xd
46
47 #define QM_MB_CMD_SEND_BASE             0x300
48 #define QM_MB_EVENT_SHIFT               8
49 #define QM_MB_BUSY_SHIFT                13
50 #define QM_MB_OP_SHIFT                  14
51 #define QM_MB_CMD_DATA_ADDR_L           0x304
52 #define QM_MB_CMD_DATA_ADDR_H           0x308
53 #define QM_MB_PING_ALL_VFS              0xffff
54 #define QM_MB_CMD_DATA_SHIFT            32
55 #define QM_MB_CMD_DATA_MASK             GENMASK(31, 0)
56
57 /* sqc shift */
58 #define QM_SQ_HOP_NUM_SHIFT             0
59 #define QM_SQ_PAGE_SIZE_SHIFT           4
60 #define QM_SQ_BUF_SIZE_SHIFT            8
61 #define QM_SQ_SQE_SIZE_SHIFT            12
62 #define QM_SQ_PRIORITY_SHIFT            0
63 #define QM_SQ_ORDERS_SHIFT              4
64 #define QM_SQ_TYPE_SHIFT                8
65 #define QM_QC_PASID_ENABLE              0x1
66 #define QM_QC_PASID_ENABLE_SHIFT        7
67
68 #define QM_SQ_TYPE_MASK                 GENMASK(3, 0)
69 #define QM_SQ_TAIL_IDX(sqc)             ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
70
71 /* cqc shift */
72 #define QM_CQ_HOP_NUM_SHIFT             0
73 #define QM_CQ_PAGE_SIZE_SHIFT           4
74 #define QM_CQ_BUF_SIZE_SHIFT            8
75 #define QM_CQ_CQE_SIZE_SHIFT            12
76 #define QM_CQ_PHASE_SHIFT               0
77 #define QM_CQ_FLAG_SHIFT                1
78
79 #define QM_CQE_PHASE(cqe)               (le16_to_cpu((cqe)->w7) & 0x1)
80 #define QM_QC_CQE_SIZE                  4
81 #define QM_CQ_TAIL_IDX(cqc)             ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
82
83 /* eqc shift */
84 #define QM_EQE_AEQE_SIZE                (2UL << 12)
85 #define QM_EQC_PHASE_SHIFT              16
86
87 #define QM_EQE_PHASE(eqe)               ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
88 #define QM_EQE_CQN_MASK                 GENMASK(15, 0)
89
90 #define QM_AEQE_PHASE(aeqe)             ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
91 #define QM_AEQE_TYPE_SHIFT              17
92
93 #define QM_DOORBELL_CMD_SQ              0
94 #define QM_DOORBELL_CMD_CQ              1
95 #define QM_DOORBELL_CMD_EQ              2
96 #define QM_DOORBELL_CMD_AEQ             3
97
98 #define QM_DOORBELL_BASE_V1             0x340
99 #define QM_DB_CMD_SHIFT_V1              16
100 #define QM_DB_INDEX_SHIFT_V1            32
101 #define QM_DB_PRIORITY_SHIFT_V1         48
102 #define QM_DOORBELL_SQ_CQ_BASE_V2       0x1000
103 #define QM_DOORBELL_EQ_AEQ_BASE_V2      0x2000
104 #define QM_QUE_ISO_CFG_V                0x0030
105 #define QM_PAGE_SIZE                    0x0034
106 #define QM_QUE_ISO_EN                   0x100154
107 #define QM_CAPBILITY                    0x100158
108 #define QM_QP_NUN_MASK                  GENMASK(10, 0)
109 #define QM_QP_DB_INTERVAL               0x10000
110 #define QM_QP_MAX_NUM_SHIFT             11
111 #define QM_DB_CMD_SHIFT_V2              12
112 #define QM_DB_RAND_SHIFT_V2             16
113 #define QM_DB_INDEX_SHIFT_V2            32
114 #define QM_DB_PRIORITY_SHIFT_V2         48
115
116 #define QM_MEM_START_INIT               0x100040
117 #define QM_MEM_INIT_DONE                0x100044
118 #define QM_VFT_CFG_RDY                  0x10006c
119 #define QM_VFT_CFG_OP_WR                0x100058
120 #define QM_VFT_CFG_TYPE                 0x10005c
121 #define QM_SQC_VFT                      0x0
122 #define QM_CQC_VFT                      0x1
123 #define QM_VFT_CFG                      0x100060
124 #define QM_VFT_CFG_OP_ENABLE            0x100054
125
126 #define QM_VFT_CFG_DATA_L               0x100064
127 #define QM_VFT_CFG_DATA_H               0x100068
128 #define QM_SQC_VFT_BUF_SIZE             (7ULL << 8)
129 #define QM_SQC_VFT_SQC_SIZE             (5ULL << 12)
130 #define QM_SQC_VFT_INDEX_NUMBER         (1ULL << 16)
131 #define QM_SQC_VFT_START_SQN_SHIFT      28
132 #define QM_SQC_VFT_VALID                (1ULL << 44)
133 #define QM_SQC_VFT_SQN_SHIFT            45
134 #define QM_CQC_VFT_BUF_SIZE             (7ULL << 8)
135 #define QM_CQC_VFT_SQC_SIZE             (5ULL << 12)
136 #define QM_CQC_VFT_INDEX_NUMBER         (1ULL << 16)
137 #define QM_CQC_VFT_VALID                (1ULL << 28)
138
139 #define QM_SQC_VFT_BASE_SHIFT_V2        28
140 #define QM_SQC_VFT_BASE_MASK_V2         GENMASK(15, 0)
141 #define QM_SQC_VFT_NUM_SHIFT_V2         45
142 #define QM_SQC_VFT_NUM_MASK_v2          GENMASK(9, 0)
143
144 #define QM_DFX_CNT_CLR_CE               0x100118
145
146 #define QM_ABNORMAL_INT_SOURCE          0x100000
147 #define QM_ABNORMAL_INT_SOURCE_CLR      GENMASK(14, 0)
148 #define QM_ABNORMAL_INT_MASK            0x100004
149 #define QM_ABNORMAL_INT_MASK_VALUE      0x7fff
150 #define QM_ABNORMAL_INT_STATUS          0x100008
151 #define QM_ABNORMAL_INT_SET             0x10000c
152 #define QM_ABNORMAL_INF00               0x100010
153 #define QM_FIFO_OVERFLOW_TYPE           0xc0
154 #define QM_FIFO_OVERFLOW_TYPE_SHIFT     6
155 #define QM_FIFO_OVERFLOW_VF             0x3f
156 #define QM_ABNORMAL_INF01               0x100014
157 #define QM_DB_TIMEOUT_TYPE              0xc0
158 #define QM_DB_TIMEOUT_TYPE_SHIFT        6
159 #define QM_DB_TIMEOUT_VF                0x3f
160 #define QM_RAS_CE_ENABLE                0x1000ec
161 #define QM_RAS_FE_ENABLE                0x1000f0
162 #define QM_RAS_NFE_ENABLE               0x1000f4
163 #define QM_RAS_CE_THRESHOLD             0x1000f8
164 #define QM_RAS_CE_TIMES_PER_IRQ         1
165 #define QM_RAS_MSI_INT_SEL              0x1040f4
166 #define QM_OOO_SHUTDOWN_SEL             0x1040f8
167
168 #define QM_RESET_WAIT_TIMEOUT           400
169 #define QM_PEH_VENDOR_ID                0x1000d8
170 #define ACC_VENDOR_ID_VALUE             0x5a5a
171 #define QM_PEH_DFX_INFO0                0x1000fc
172 #define QM_PEH_DFX_INFO1                0x100100
173 #define QM_PEH_DFX_MASK                 (BIT(0) | BIT(2))
174 #define QM_PEH_MSI_FINISH_MASK          GENMASK(19, 16)
175 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
176 #define ACC_PEH_MSI_DISABLE             GENMASK(31, 0)
177 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
178 #define ACC_MASTER_TRANS_RETURN_RW      3
179 #define ACC_MASTER_TRANS_RETURN         0x300150
180 #define ACC_MASTER_GLOBAL_CTRL          0x300000
181 #define ACC_AM_CFG_PORT_WR_EN           0x30001c
182 #define QM_RAS_NFE_MBIT_DISABLE         ~QM_ECC_MBIT
183 #define ACC_AM_ROB_ECC_INT_STS          0x300104
184 #define ACC_ROB_ECC_ERR_MULTPL          BIT(1)
185 #define QM_MSI_CAP_ENABLE               BIT(16)
186
187 /* interfunction communication */
188 #define QM_IFC_READY_STATUS             0x100128
189 #define QM_IFC_C_STS_M                  0x10012C
190 #define QM_IFC_INT_SET_P                0x100130
191 #define QM_IFC_INT_CFG                  0x100134
192 #define QM_IFC_INT_SOURCE_P             0x100138
193 #define QM_IFC_INT_SOURCE_V             0x0020
194 #define QM_IFC_INT_MASK                 0x0024
195 #define QM_IFC_INT_STATUS               0x0028
196 #define QM_IFC_INT_SET_V                0x002C
197 #define QM_IFC_SEND_ALL_VFS             GENMASK(6, 0)
198 #define QM_IFC_INT_SOURCE_CLR           GENMASK(63, 0)
199 #define QM_IFC_INT_SOURCE_MASK          BIT(0)
200 #define QM_IFC_INT_DISABLE              BIT(0)
201 #define QM_IFC_INT_STATUS_MASK          BIT(0)
202 #define QM_IFC_INT_SET_MASK             BIT(0)
203 #define QM_WAIT_DST_ACK                 10
204 #define QM_MAX_PF_WAIT_COUNT            10
205 #define QM_MAX_VF_WAIT_COUNT            40
206 #define QM_VF_RESET_WAIT_US            20000
207 #define QM_VF_RESET_WAIT_CNT           3000
208 #define QM_VF_RESET_WAIT_TIMEOUT_US    \
209         (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
210
211 #define QM_DFX_MB_CNT_VF                0x104010
212 #define QM_DFX_DB_CNT_VF                0x104020
213 #define QM_DFX_SQE_CNT_VF_SQN           0x104030
214 #define QM_DFX_CQE_CNT_VF_CQN           0x104040
215 #define QM_DFX_QN_SHIFT                 16
216 #define CURRENT_FUN_MASK                GENMASK(5, 0)
217 #define CURRENT_Q_MASK                  GENMASK(31, 16)
218
219 #define POLL_PERIOD                     10
220 #define POLL_TIMEOUT                    1000
221 #define WAIT_PERIOD_US_MAX              200
222 #define WAIT_PERIOD_US_MIN              100
223 #define MAX_WAIT_COUNTS                 1000
224 #define QM_CACHE_WB_START               0x204
225 #define QM_CACHE_WB_DONE                0x208
226
227 #define PCI_BAR_2                       2
228 #define PCI_BAR_4                       4
229 #define QM_SQE_DATA_ALIGN_MASK          GENMASK(6, 0)
230 #define QMC_ALIGN(sz)                   ALIGN(sz, 32)
231
232 #define QM_DBG_READ_LEN         256
233 #define QM_DBG_WRITE_LEN                1024
234 #define QM_DBG_TMP_BUF_LEN              22
235 #define QM_PCI_COMMAND_INVALID          ~0
236 #define QM_RESET_STOP_TX_OFFSET         1
237 #define QM_RESET_STOP_RX_OFFSET         2
238
239 #define WAIT_PERIOD                     20
240 #define REMOVE_WAIT_DELAY               10
241 #define QM_SQE_ADDR_MASK                GENMASK(7, 0)
242 #define QM_EQ_DEPTH                     (1024 * 2)
243
244 #define QM_DRIVER_REMOVING              0
245 #define QM_RST_SCHED                    1
246 #define QM_RESETTING                    2
247 #define QM_QOS_PARAM_NUM                2
248 #define QM_QOS_VAL_NUM                  1
249 #define QM_QOS_BDF_PARAM_NUM            4
250 #define QM_QOS_MAX_VAL                  1000
251 #define QM_QOS_RATE                     100
252 #define QM_QOS_EXPAND_RATE              1000
253 #define QM_SHAPER_CIR_B_MASK            GENMASK(7, 0)
254 #define QM_SHAPER_CIR_U_MASK            GENMASK(10, 8)
255 #define QM_SHAPER_CIR_S_MASK            GENMASK(14, 11)
256 #define QM_SHAPER_FACTOR_CIR_U_SHIFT    8
257 #define QM_SHAPER_FACTOR_CIR_S_SHIFT    11
258 #define QM_SHAPER_FACTOR_CBS_B_SHIFT    15
259 #define QM_SHAPER_FACTOR_CBS_S_SHIFT    19
260 #define QM_SHAPER_CBS_B                 1
261 #define QM_SHAPER_CBS_S                 16
262 #define QM_SHAPER_VFT_OFFSET            6
263 #define WAIT_FOR_QOS_VF                 100
264 #define QM_QOS_MIN_ERROR_RATE           5
265 #define QM_QOS_TYPICAL_NUM              8
266 #define QM_SHAPER_MIN_CBS_S             8
267 #define QM_QOS_TICK                     0x300U
268 #define QM_QOS_DIVISOR_CLK              0x1f40U
269 #define QM_QOS_MAX_CIR_B                200
270 #define QM_QOS_MIN_CIR_B                100
271 #define QM_QOS_MAX_CIR_U                6
272 #define QM_QOS_MAX_CIR_S                11
273 #define QM_QOS_VAL_MAX_LEN              32
274
275 #define QM_AUTOSUSPEND_DELAY            3000
276
277 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
278         (((hop_num) << QM_CQ_HOP_NUM_SHIFT)     | \
279         ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)      | \
280         ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT)      | \
281         ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
282
283 #define QM_MK_CQC_DW3_V2(cqe_sz) \
284         ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
285
286 #define QM_MK_SQC_W13(priority, orders, alg_type) \
287         (((priority) << QM_SQ_PRIORITY_SHIFT)   | \
288         ((orders) << QM_SQ_ORDERS_SHIFT)        | \
289         (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
290
291 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
292         (((hop_num) << QM_SQ_HOP_NUM_SHIFT)     | \
293         ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT)      | \
294         ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT)      | \
295         ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
296
297 #define QM_MK_SQC_DW3_V2(sqe_sz) \
298         ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
299
300 #define INIT_QC_COMMON(qc, base, pasid) do {                    \
301         (qc)->head = 0;                                         \
302         (qc)->tail = 0;                                         \
303         (qc)->base_l = cpu_to_le32(lower_32_bits(base));        \
304         (qc)->base_h = cpu_to_le32(upper_32_bits(base));        \
305         (qc)->dw3 = 0;                                          \
306         (qc)->w8 = 0;                                           \
307         (qc)->rsvd0 = 0;                                        \
308         (qc)->pasid = cpu_to_le16(pasid);                       \
309         (qc)->w11 = 0;                                          \
310         (qc)->rsvd1 = 0;                                        \
311 } while (0)
312
313 enum vft_type {
314         SQC_VFT = 0,
315         CQC_VFT,
316         SHAPER_VFT,
317 };
318
319 enum acc_err_result {
320         ACC_ERR_NONE,
321         ACC_ERR_NEED_RESET,
322         ACC_ERR_RECOVERED,
323 };
324
325 enum qm_alg_type {
326         ALG_TYPE_0,
327         ALG_TYPE_1,
328 };
329
330 enum qm_mb_cmd {
331         QM_PF_FLR_PREPARE = 0x01,
332         QM_PF_SRST_PREPARE,
333         QM_PF_RESET_DONE,
334         QM_VF_PREPARE_DONE,
335         QM_VF_PREPARE_FAIL,
336         QM_VF_START_DONE,
337         QM_VF_START_FAIL,
338         QM_PF_SET_QOS,
339         QM_VF_GET_QOS,
340 };
341
342 struct qm_cqe {
343         __le32 rsvd0;
344         __le16 cmd_id;
345         __le16 rsvd1;
346         __le16 sq_head;
347         __le16 sq_num;
348         __le16 rsvd2;
349         __le16 w7;
350 };
351
352 struct qm_eqe {
353         __le32 dw0;
354 };
355
356 struct qm_aeqe {
357         __le32 dw0;
358 };
359
360 struct qm_sqc {
361         __le16 head;
362         __le16 tail;
363         __le32 base_l;
364         __le32 base_h;
365         __le32 dw3;
366         __le16 w8;
367         __le16 rsvd0;
368         __le16 pasid;
369         __le16 w11;
370         __le16 cq_num;
371         __le16 w13;
372         __le32 rsvd1;
373 };
374
375 struct qm_cqc {
376         __le16 head;
377         __le16 tail;
378         __le32 base_l;
379         __le32 base_h;
380         __le32 dw3;
381         __le16 w8;
382         __le16 rsvd0;
383         __le16 pasid;
384         __le16 w11;
385         __le32 dw6;
386         __le32 rsvd1;
387 };
388
389 struct qm_eqc {
390         __le16 head;
391         __le16 tail;
392         __le32 base_l;
393         __le32 base_h;
394         __le32 dw3;
395         __le32 rsvd[2];
396         __le32 dw6;
397 };
398
399 struct qm_aeqc {
400         __le16 head;
401         __le16 tail;
402         __le32 base_l;
403         __le32 base_h;
404         __le32 dw3;
405         __le32 rsvd[2];
406         __le32 dw6;
407 };
408
409 struct qm_mailbox {
410         __le16 w0;
411         __le16 queue_num;
412         __le32 base_l;
413         __le32 base_h;
414         __le32 rsvd;
415 };
416
417 struct qm_doorbell {
418         __le16 queue_num;
419         __le16 cmd;
420         __le16 index;
421         __le16 priority;
422 };
423
424 struct hisi_qm_resource {
425         struct hisi_qm *qm;
426         int distance;
427         struct list_head list;
428 };
429
430 struct hisi_qm_hw_ops {
431         int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
432         void (*qm_db)(struct hisi_qm *qm, u16 qn,
433                       u8 cmd, u16 index, u8 priority);
434         u32 (*get_irq_num)(struct hisi_qm *qm);
435         int (*debug_init)(struct hisi_qm *qm);
436         void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
437         void (*hw_error_uninit)(struct hisi_qm *qm);
438         enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
439         int (*stop_qp)(struct hisi_qp *qp);
440         int (*set_msi)(struct hisi_qm *qm, bool set);
441         int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
442         int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
443 };
444
445 struct qm_dfx_item {
446         const char *name;
447         u32 offset;
448 };
449
450 static struct qm_dfx_item qm_dfx_files[] = {
451         {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
452         {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
453         {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
454         {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
455         {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
456 };
457
458 static const char * const qm_debug_file_name[] = {
459         [CURRENT_QM]   = "current_qm",
460         [CURRENT_Q]    = "current_q",
461         [CLEAR_ENABLE] = "clear_enable",
462 };
463
464 struct hisi_qm_hw_error {
465         u32 int_msk;
466         const char *msg;
467 };
468
469 static const struct hisi_qm_hw_error qm_hw_error[] = {
470         { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
471         { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
472         { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
473         { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
474         { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
475         { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
476         { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
477         { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
478         { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
479         { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
480         { .int_msk = BIT(10), .msg = "qm_db_timeout" },
481         { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
482         { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
483         { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
484         { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
485         { /* sentinel */ }
486 };
487
488 static const char * const qm_db_timeout[] = {
489         "sq", "cq", "eq", "aeq",
490 };
491
492 static const char * const qm_fifo_overflow[] = {
493         "cq", "eq", "aeq",
494 };
495
496 static const char * const qm_s[] = {
497         "init", "start", "close", "stop",
498 };
499
500 static const char * const qp_s[] = {
501         "none", "init", "start", "stop", "close",
502 };
503
504 static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000,
505                                                 10000, 25000, 50000, 100000};
506 static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16,
507                                                          17, 18, 19};
508
509 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
510 {
511         enum qm_state curr = atomic_read(&qm->status.flags);
512         bool avail = false;
513
514         switch (curr) {
515         case QM_INIT:
516                 if (new == QM_START || new == QM_CLOSE)
517                         avail = true;
518                 break;
519         case QM_START:
520                 if (new == QM_STOP)
521                         avail = true;
522                 break;
523         case QM_STOP:
524                 if (new == QM_CLOSE || new == QM_START)
525                         avail = true;
526                 break;
527         default:
528                 break;
529         }
530
531         dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
532                 qm_s[curr], qm_s[new]);
533
534         if (!avail)
535                 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
536                          qm_s[curr], qm_s[new]);
537
538         return avail;
539 }
540
541 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
542                               enum qp_state new)
543 {
544         enum qm_state qm_curr = atomic_read(&qm->status.flags);
545         enum qp_state qp_curr = 0;
546         bool avail = false;
547
548         if (qp)
549                 qp_curr = atomic_read(&qp->qp_status.flags);
550
551         switch (new) {
552         case QP_INIT:
553                 if (qm_curr == QM_START || qm_curr == QM_INIT)
554                         avail = true;
555                 break;
556         case QP_START:
557                 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
558                     (qm_curr == QM_START && qp_curr == QP_STOP))
559                         avail = true;
560                 break;
561         case QP_STOP:
562                 if ((qm_curr == QM_START && qp_curr == QP_START) ||
563                     (qp_curr == QP_INIT))
564                         avail = true;
565                 break;
566         case QP_CLOSE:
567                 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
568                     (qm_curr == QM_START && qp_curr == QP_STOP) ||
569                     (qm_curr == QM_STOP && qp_curr == QP_STOP)  ||
570                     (qm_curr == QM_STOP && qp_curr == QP_INIT))
571                         avail = true;
572                 break;
573         default:
574                 break;
575         }
576
577         dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
578                 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
579
580         if (!avail)
581                 dev_warn(&qm->pdev->dev,
582                          "Can not change qp state from %s to %s in QM %s\n",
583                          qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
584
585         return avail;
586 }
587
588 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
589                            u64 base, u16 queue, bool op)
590 {
591         mailbox->w0 = cpu_to_le16((cmd) |
592                 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
593                 (0x1 << QM_MB_BUSY_SHIFT));
594         mailbox->queue_num = cpu_to_le16(queue);
595         mailbox->base_l = cpu_to_le32(lower_32_bits(base));
596         mailbox->base_h = cpu_to_le32(upper_32_bits(base));
597         mailbox->rsvd = 0;
598 }
599
600 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
601 static int qm_wait_mb_ready(struct hisi_qm *qm)
602 {
603         u32 val;
604
605         return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
606                                           val, !((val >> QM_MB_BUSY_SHIFT) &
607                                           0x1), POLL_PERIOD, POLL_TIMEOUT);
608 }
609
610 /* 128 bit should be written to hardware at one time to trigger a mailbox */
611 static void qm_mb_write(struct hisi_qm *qm, const void *src)
612 {
613         void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
614         unsigned long tmp0 = 0, tmp1 = 0;
615
616         if (!IS_ENABLED(CONFIG_ARM64)) {
617                 memcpy_toio(fun_base, src, 16);
618                 wmb();
619                 return;
620         }
621
622         asm volatile("ldp %0, %1, %3\n"
623                      "stp %0, %1, %2\n"
624                      "dsb sy\n"
625                      : "=&r" (tmp0),
626                        "=&r" (tmp1),
627                        "+Q" (*((char __iomem *)fun_base))
628                      : "Q" (*((char *)src))
629                      : "memory");
630 }
631
632 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
633 {
634         if (unlikely(qm_wait_mb_ready(qm))) {
635                 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
636                 goto mb_busy;
637         }
638
639         qm_mb_write(qm, mailbox);
640
641         if (unlikely(qm_wait_mb_ready(qm))) {
642                 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
643                 goto mb_busy;
644         }
645
646         return 0;
647
648 mb_busy:
649         atomic64_inc(&qm->debug.dfx.mb_err_cnt);
650         return -EBUSY;
651 }
652
653 static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
654                  bool op)
655 {
656         struct qm_mailbox mailbox;
657         int ret;
658
659         dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
660                 queue, cmd, (unsigned long long)dma_addr);
661
662         qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
663
664         mutex_lock(&qm->mailbox_lock);
665         ret = qm_mb_nolock(qm, &mailbox);
666         mutex_unlock(&qm->mailbox_lock);
667
668         return ret;
669 }
670
671 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
672 {
673         u64 doorbell;
674
675         doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
676                    ((u64)index << QM_DB_INDEX_SHIFT_V1)  |
677                    ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
678
679         writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
680 }
681
682 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
683 {
684         void __iomem *io_base = qm->io_base;
685         u16 randata = 0;
686         u64 doorbell;
687
688         if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
689                 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
690                           QM_DOORBELL_SQ_CQ_BASE_V2;
691         else
692                 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
693
694         doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
695                    ((u64)randata << QM_DB_RAND_SHIFT_V2) |
696                    ((u64)index << QM_DB_INDEX_SHIFT_V2)  |
697                    ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
698
699         writeq(doorbell, io_base);
700 }
701
702 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
703 {
704         dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
705                 qn, cmd, index);
706
707         qm->ops->qm_db(qm, qn, cmd, index, priority);
708 }
709
710 static int qm_dev_mem_reset(struct hisi_qm *qm)
711 {
712         u32 val;
713
714         writel(0x1, qm->io_base + QM_MEM_START_INIT);
715         return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
716                                           val & BIT(0), POLL_PERIOD,
717                                           POLL_TIMEOUT);
718 }
719
720 static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
721 {
722         return QM_IRQ_NUM_V1;
723 }
724
725 static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
726 {
727         if (qm->fun_type == QM_HW_PF)
728                 return QM_IRQ_NUM_PF_V2;
729         else
730                 return QM_IRQ_NUM_VF_V2;
731 }
732
733 static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
734 {
735         if (qm->fun_type == QM_HW_PF)
736                 return QM_IRQ_NUM_PF_V2;
737
738         return QM_IRQ_NUM_VF_V3;
739 }
740
741 static int qm_pm_get_sync(struct hisi_qm *qm)
742 {
743         struct device *dev = &qm->pdev->dev;
744         int ret;
745
746         if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
747                 return 0;
748
749         ret = pm_runtime_resume_and_get(dev);
750         if (ret < 0) {
751                 dev_err(dev, "failed to get_sync(%d).\n", ret);
752                 return ret;
753         }
754
755         return 0;
756 }
757
758 static void qm_pm_put_sync(struct hisi_qm *qm)
759 {
760         struct device *dev = &qm->pdev->dev;
761
762         if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
763                 return;
764
765         pm_runtime_mark_last_busy(dev);
766         pm_runtime_put_autosuspend(dev);
767 }
768
769 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
770 {
771         u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
772
773         return &qm->qp_array[cqn];
774 }
775
776 static void qm_cq_head_update(struct hisi_qp *qp)
777 {
778         if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
779                 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
780                 qp->qp_status.cq_head = 0;
781         } else {
782                 qp->qp_status.cq_head++;
783         }
784 }
785
786 static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
787 {
788         if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
789                 return;
790
791         if (qp->event_cb) {
792                 qp->event_cb(qp);
793                 return;
794         }
795
796         if (qp->req_cb) {
797                 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
798
799                 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
800                         dma_rmb();
801                         qp->req_cb(qp, qp->sqe + qm->sqe_size *
802                                    le16_to_cpu(cqe->sq_head));
803                         qm_cq_head_update(qp);
804                         cqe = qp->cqe + qp->qp_status.cq_head;
805                         qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
806                               qp->qp_status.cq_head, 0);
807                         atomic_dec(&qp->qp_status.used);
808                 }
809
810                 /* set c_flag */
811                 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
812                       qp->qp_status.cq_head, 1);
813         }
814 }
815
816 static void qm_work_process(struct work_struct *work)
817 {
818         struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
819         struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
820         struct hisi_qp *qp;
821         int eqe_num = 0;
822
823         while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
824                 eqe_num++;
825                 qp = qm_to_hisi_qp(qm, eqe);
826                 qm_poll_qp(qp, qm);
827
828                 if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
829                         qm->status.eqc_phase = !qm->status.eqc_phase;
830                         eqe = qm->eqe;
831                         qm->status.eq_head = 0;
832                 } else {
833                         eqe++;
834                         qm->status.eq_head++;
835                 }
836
837                 if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
838                         eqe_num = 0;
839                         qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
840                 }
841         }
842
843         qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
844 }
845
846 static irqreturn_t do_qm_irq(int irq, void *data)
847 {
848         struct hisi_qm *qm = (struct hisi_qm *)data;
849
850         /* the workqueue created by device driver of QM */
851         if (qm->wq)
852                 queue_work(qm->wq, &qm->work);
853         else
854                 schedule_work(&qm->work);
855
856         return IRQ_HANDLED;
857 }
858
859 static irqreturn_t qm_irq(int irq, void *data)
860 {
861         struct hisi_qm *qm = data;
862
863         if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
864                 return do_qm_irq(irq, data);
865
866         atomic64_inc(&qm->debug.dfx.err_irq_cnt);
867         dev_err(&qm->pdev->dev, "invalid int source\n");
868         qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
869
870         return IRQ_NONE;
871 }
872
873 static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
874 {
875         struct hisi_qm *qm = data;
876         u32 val;
877
878         val = readl(qm->io_base + QM_IFC_INT_STATUS);
879         val &= QM_IFC_INT_STATUS_MASK;
880         if (!val)
881                 return IRQ_NONE;
882
883         schedule_work(&qm->cmd_process);
884
885         return IRQ_HANDLED;
886 }
887
888 static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
889 {
890         u32 *addr;
891
892         if (qp->is_in_kernel)
893                 return;
894
895         addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
896         *addr = 1;
897
898         /* make sure setup is completed */
899         mb();
900 }
901
902 static irqreturn_t qm_aeq_irq(int irq, void *data)
903 {
904         struct hisi_qm *qm = data;
905         struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
906         u32 type;
907
908         atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
909         if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
910                 return IRQ_NONE;
911
912         while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
913                 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
914                 if (type < ARRAY_SIZE(qm_fifo_overflow))
915                         dev_err(&qm->pdev->dev, "%s overflow\n",
916                                 qm_fifo_overflow[type]);
917                 else
918                         dev_err(&qm->pdev->dev, "unknown error type %u\n",
919                                 type);
920
921                 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
922                         qm->status.aeqc_phase = !qm->status.aeqc_phase;
923                         aeqe = qm->aeqe;
924                         qm->status.aeq_head = 0;
925                 } else {
926                         aeqe++;
927                         qm->status.aeq_head++;
928                 }
929
930                 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
931         }
932
933         return IRQ_HANDLED;
934 }
935
936 static void qm_irq_unregister(struct hisi_qm *qm)
937 {
938         struct pci_dev *pdev = qm->pdev;
939
940         free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
941
942         if (qm->ver > QM_HW_V1) {
943                 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
944
945                 if (qm->fun_type == QM_HW_PF)
946                         free_irq(pci_irq_vector(pdev,
947                                  QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
948         }
949
950         if (qm->ver > QM_HW_V2)
951                 free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
952 }
953
954 static void qm_init_qp_status(struct hisi_qp *qp)
955 {
956         struct hisi_qp_status *qp_status = &qp->qp_status;
957
958         qp_status->sq_tail = 0;
959         qp_status->cq_head = 0;
960         qp_status->cqc_phase = true;
961         atomic_set(&qp_status->used, 0);
962 }
963
964 static void qm_init_prefetch(struct hisi_qm *qm)
965 {
966         struct device *dev = &qm->pdev->dev;
967         u32 page_type = 0x0;
968
969         if (qm->ver < QM_HW_V3)
970                 return;
971
972         switch (PAGE_SIZE) {
973         case SZ_4K:
974                 page_type = 0x0;
975                 break;
976         case SZ_16K:
977                 page_type = 0x1;
978                 break;
979         case SZ_64K:
980                 page_type = 0x2;
981                 break;
982         default:
983                 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
984                         PAGE_SIZE);
985         }
986
987         writel(page_type, qm->io_base + QM_PAGE_SIZE);
988 }
989
990 /*
991  * the formula:
992  * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
993  *
994  *                      IR_b * (2 ^ IR_u) * 8
995  * IR(Mbps) * 10 ^ -3 = -------------------------
996  *                      Tick * (2 ^ IR_s)
997  */
998 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
999 {
1000         return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1001                                         (QM_QOS_TICK * (1 << cir_s));
1002 }
1003
1004 static u32 acc_shaper_calc_cbs_s(u32 ir)
1005 {
1006         int i;
1007
1008         if (ir < typical_qos_val[0])
1009                 return QM_SHAPER_MIN_CBS_S;
1010
1011         for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) {
1012                 if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i])
1013                         return typical_qos_cbs_s[i - 1];
1014         }
1015
1016         return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1];
1017 }
1018
1019 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1020 {
1021         u32 cir_b, cir_u, cir_s, ir_calc;
1022         u32 error_rate;
1023
1024         factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1025
1026         for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1027                 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1028                         for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) {
1029                                 /** the formula is changed to:
1030                                  *         IR_b * (2 ^ IR_u) * DIVISOR_CLK
1031                                  * IR(Mbps) = -------------------------
1032                                  *             768 * (2 ^ IR_s)
1033                                  */
1034                                 ir_calc = acc_shaper_para_calc(cir_b, cir_u,
1035                                                                cir_s);
1036                                 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1037                                 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1038                                         factor->cir_b = cir_b;
1039                                         factor->cir_u = cir_u;
1040                                         factor->cir_s = cir_s;
1041
1042                                         return 0;
1043                                 }
1044                         }
1045                 }
1046         }
1047
1048         return -EINVAL;
1049 }
1050
1051 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1052                             u32 number, struct qm_shaper_factor *factor)
1053 {
1054         u64 tmp = 0;
1055
1056         if (number > 0) {
1057                 switch (type) {
1058                 case SQC_VFT:
1059                         if (qm->ver == QM_HW_V1) {
1060                                 tmp = QM_SQC_VFT_BUF_SIZE       |
1061                                       QM_SQC_VFT_SQC_SIZE       |
1062                                       QM_SQC_VFT_INDEX_NUMBER   |
1063                                       QM_SQC_VFT_VALID          |
1064                                       (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1065                         } else {
1066                                 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1067                                       QM_SQC_VFT_VALID |
1068                                       (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1069                         }
1070                         break;
1071                 case CQC_VFT:
1072                         if (qm->ver == QM_HW_V1) {
1073                                 tmp = QM_CQC_VFT_BUF_SIZE       |
1074                                       QM_CQC_VFT_SQC_SIZE       |
1075                                       QM_CQC_VFT_INDEX_NUMBER   |
1076                                       QM_CQC_VFT_VALID;
1077                         } else {
1078                                 tmp = QM_CQC_VFT_VALID;
1079                         }
1080                         break;
1081                 case SHAPER_VFT:
1082                         if (qm->ver >= QM_HW_V3) {
1083                                 tmp = factor->cir_b |
1084                                 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1085                                 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1086                                 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1087                                 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1088                         }
1089                         break;
1090                 }
1091         }
1092
1093         writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1094         writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1095 }
1096
1097 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1098                              u32 fun_num, u32 base, u32 number)
1099 {
1100         struct qm_shaper_factor *factor = &qm->factor[fun_num];
1101         unsigned int val;
1102         int ret;
1103
1104         ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1105                                          val & BIT(0), POLL_PERIOD,
1106                                          POLL_TIMEOUT);
1107         if (ret)
1108                 return ret;
1109
1110         writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1111         writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1112         if (type == SHAPER_VFT)
1113                 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1114
1115         writel(fun_num, qm->io_base + QM_VFT_CFG);
1116
1117         qm_vft_data_cfg(qm, type, base, number, factor);
1118
1119         writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1120         writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1121
1122         return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1123                                           val & BIT(0), POLL_PERIOD,
1124                                           POLL_TIMEOUT);
1125 }
1126
1127 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1128 {
1129         int ret, i;
1130
1131         qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL;
1132         ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]);
1133         if (ret) {
1134                 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1135                 return ret;
1136         }
1137         writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1138         for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1139                 /* The base number of queue reuse for different alg type */
1140                 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1141                 if (ret)
1142                         return ret;
1143         }
1144
1145         return 0;
1146 }
1147
1148 /* The config should be conducted after qm_dev_mem_reset() */
1149 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1150                               u32 number)
1151 {
1152         int ret, i;
1153
1154         for (i = SQC_VFT; i <= CQC_VFT; i++) {
1155                 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1156                 if (ret)
1157                         return ret;
1158         }
1159
1160         /* init default shaper qos val */
1161         if (qm->ver >= QM_HW_V3) {
1162                 ret = qm_shaper_init_vft(qm, fun_num);
1163                 if (ret)
1164                         goto back_sqc_cqc;
1165         }
1166
1167         return 0;
1168 back_sqc_cqc:
1169         for (i = SQC_VFT; i <= CQC_VFT; i++) {
1170                 ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
1171                 if (ret)
1172                         return ret;
1173         }
1174         return ret;
1175 }
1176
1177 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1178 {
1179         u64 sqc_vft;
1180         int ret;
1181
1182         ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1183         if (ret)
1184                 return ret;
1185
1186         sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1187                   ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1188         *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1189         *number = (QM_SQC_VFT_NUM_MASK_v2 &
1190                    (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1191
1192         return 0;
1193 }
1194
1195 static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
1196 {
1197         u32 remain_q_num, vfq_num;
1198         u32 num_vfs = qm->vfs_num;
1199
1200         vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
1201         if (vfq_num >= qm->max_qp_num)
1202                 return qm->max_qp_num;
1203
1204         remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
1205         if (vfq_num + remain_q_num <= qm->max_qp_num)
1206                 return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
1207
1208         /*
1209          * if vfq_num + remain_q_num > max_qp_num, the last VFs,
1210          * each with one more queue.
1211          */
1212         return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
1213 }
1214
1215 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
1216 {
1217         struct qm_debug *debug = file->debug;
1218
1219         return container_of(debug, struct hisi_qm, debug);
1220 }
1221
1222 static u32 current_q_read(struct hisi_qm *qm)
1223 {
1224         return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
1225 }
1226
1227 static int current_q_write(struct hisi_qm *qm, u32 val)
1228 {
1229         u32 tmp;
1230
1231         if (val >= qm->debug.curr_qm_qp_num)
1232                 return -EINVAL;
1233
1234         tmp = val << QM_DFX_QN_SHIFT |
1235               (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
1236         writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1237
1238         tmp = val << QM_DFX_QN_SHIFT |
1239               (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
1240         writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1241
1242         return 0;
1243 }
1244
1245 static u32 clear_enable_read(struct hisi_qm *qm)
1246 {
1247         return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
1248 }
1249
1250 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
1251 static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
1252 {
1253         if (rd_clr_ctrl > 1)
1254                 return -EINVAL;
1255
1256         writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
1257
1258         return 0;
1259 }
1260
1261 static u32 current_qm_read(struct hisi_qm *qm)
1262 {
1263         return readl(qm->io_base + QM_DFX_MB_CNT_VF);
1264 }
1265
1266 static int current_qm_write(struct hisi_qm *qm, u32 val)
1267 {
1268         u32 tmp;
1269
1270         if (val > qm->vfs_num)
1271                 return -EINVAL;
1272
1273         /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
1274         if (!val)
1275                 qm->debug.curr_qm_qp_num = qm->qp_num;
1276         else
1277                 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
1278
1279         writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
1280         writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
1281
1282         tmp = val |
1283               (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
1284         writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1285
1286         tmp = val |
1287               (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
1288         writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1289
1290         return 0;
1291 }
1292
1293 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
1294                              size_t count, loff_t *pos)
1295 {
1296         struct debugfs_file *file = filp->private_data;
1297         enum qm_debug_file index = file->index;
1298         struct hisi_qm *qm = file_to_qm(file);
1299         char tbuf[QM_DBG_TMP_BUF_LEN];
1300         u32 val;
1301         int ret;
1302
1303         ret = hisi_qm_get_dfx_access(qm);
1304         if (ret)
1305                 return ret;
1306
1307         mutex_lock(&file->lock);
1308         switch (index) {
1309         case CURRENT_QM:
1310                 val = current_qm_read(qm);
1311                 break;
1312         case CURRENT_Q:
1313                 val = current_q_read(qm);
1314                 break;
1315         case CLEAR_ENABLE:
1316                 val = clear_enable_read(qm);
1317                 break;
1318         default:
1319                 goto err_input;
1320         }
1321         mutex_unlock(&file->lock);
1322
1323         hisi_qm_put_dfx_access(qm);
1324         ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
1325         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
1326
1327 err_input:
1328         mutex_unlock(&file->lock);
1329         hisi_qm_put_dfx_access(qm);
1330         return -EINVAL;
1331 }
1332
1333 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
1334                               size_t count, loff_t *pos)
1335 {
1336         struct debugfs_file *file = filp->private_data;
1337         enum qm_debug_file index = file->index;
1338         struct hisi_qm *qm = file_to_qm(file);
1339         unsigned long val;
1340         char tbuf[QM_DBG_TMP_BUF_LEN];
1341         int len, ret;
1342
1343         if (*pos != 0)
1344                 return 0;
1345
1346         if (count >= QM_DBG_TMP_BUF_LEN)
1347                 return -ENOSPC;
1348
1349         len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
1350                                      count);
1351         if (len < 0)
1352                 return len;
1353
1354         tbuf[len] = '\0';
1355         if (kstrtoul(tbuf, 0, &val))
1356                 return -EFAULT;
1357
1358         ret = hisi_qm_get_dfx_access(qm);
1359         if (ret)
1360                 return ret;
1361
1362         mutex_lock(&file->lock);
1363         switch (index) {
1364         case CURRENT_QM:
1365                 ret = current_qm_write(qm, val);
1366                 break;
1367         case CURRENT_Q:
1368                 ret = current_q_write(qm, val);
1369                 break;
1370         case CLEAR_ENABLE:
1371                 ret = clear_enable_write(qm, val);
1372                 break;
1373         default:
1374                 ret = -EINVAL;
1375         }
1376         mutex_unlock(&file->lock);
1377
1378         hisi_qm_put_dfx_access(qm);
1379
1380         if (ret)
1381                 return ret;
1382
1383         return count;
1384 }
1385
1386 static const struct file_operations qm_debug_fops = {
1387         .owner = THIS_MODULE,
1388         .open = simple_open,
1389         .read = qm_debug_read,
1390         .write = qm_debug_write,
1391 };
1392
1393 #define CNT_CYC_REGS_NUM                10
1394 static const struct debugfs_reg32 qm_dfx_regs[] = {
1395         /* XXX_CNT are reading clear register */
1396         {"QM_ECC_1BIT_CNT               ",  0x104000ull},
1397         {"QM_ECC_MBIT_CNT               ",  0x104008ull},
1398         {"QM_DFX_MB_CNT                 ",  0x104018ull},
1399         {"QM_DFX_DB_CNT                 ",  0x104028ull},
1400         {"QM_DFX_SQE_CNT                ",  0x104038ull},
1401         {"QM_DFX_CQE_CNT                ",  0x104048ull},
1402         {"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050ull},
1403         {"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058ull},
1404         {"QM_DFX_ACC_FINISH_CNT         ",  0x104060ull},
1405         {"QM_DFX_CQE_ERR_CNT            ",  0x1040b4ull},
1406         {"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
1407         {"QM_ECC_1BIT_INF               ",  0x104004ull},
1408         {"QM_ECC_MBIT_INF               ",  0x10400cull},
1409         {"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0ull},
1410         {"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4ull},
1411         {"QM_DFX_AXI_RDY_VLD            ",  0x1040a8ull},
1412         {"QM_DFX_FF_ST0                 ",  0x1040c8ull},
1413         {"QM_DFX_FF_ST1                 ",  0x1040ccull},
1414         {"QM_DFX_FF_ST2                 ",  0x1040d0ull},
1415         {"QM_DFX_FF_ST3                 ",  0x1040d4ull},
1416         {"QM_DFX_FF_ST4                 ",  0x1040d8ull},
1417         {"QM_DFX_FF_ST5                 ",  0x1040dcull},
1418         {"QM_DFX_FF_ST6                 ",  0x1040e0ull},
1419         {"QM_IN_IDLE_ST                 ",  0x1040e4ull},
1420 };
1421
1422 static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
1423         {"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
1424 };
1425
1426 /**
1427  * hisi_qm_regs_dump() - Dump registers's value.
1428  * @s: debugfs file handle.
1429  * @regset: accelerator registers information.
1430  *
1431  * Dump accelerator registers.
1432  */
1433 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
1434 {
1435         struct pci_dev *pdev = to_pci_dev(regset->dev);
1436         struct hisi_qm *qm = pci_get_drvdata(pdev);
1437         const struct debugfs_reg32 *regs = regset->regs;
1438         int regs_len = regset->nregs;
1439         int i, ret;
1440         u32 val;
1441
1442         ret = hisi_qm_get_dfx_access(qm);
1443         if (ret)
1444                 return;
1445
1446         for (i = 0; i < regs_len; i++) {
1447                 val = readl(regset->base + regs[i].offset);
1448                 seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
1449         }
1450
1451         hisi_qm_put_dfx_access(qm);
1452 }
1453 EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
1454
1455 static int qm_regs_show(struct seq_file *s, void *unused)
1456 {
1457         struct hisi_qm *qm = s->private;
1458         struct debugfs_regset32 regset;
1459
1460         if (qm->fun_type == QM_HW_PF) {
1461                 regset.regs = qm_dfx_regs;
1462                 regset.nregs = ARRAY_SIZE(qm_dfx_regs);
1463         } else {
1464                 regset.regs = qm_vf_dfx_regs;
1465                 regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
1466         }
1467
1468         regset.base = qm->io_base;
1469         regset.dev = &qm->pdev->dev;
1470
1471         hisi_qm_regs_dump(s, &regset);
1472
1473         return 0;
1474 }
1475
1476 DEFINE_SHOW_ATTRIBUTE(qm_regs);
1477
1478 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1479                            size_t count, loff_t *pos)
1480 {
1481         char buf[QM_DBG_READ_LEN];
1482         int len;
1483
1484         len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1485                         "Please echo help to cmd to get help information");
1486
1487         return simple_read_from_buffer(buffer, count, pos, buf, len);
1488 }
1489
1490 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1491                           dma_addr_t *dma_addr)
1492 {
1493         struct device *dev = &qm->pdev->dev;
1494         void *ctx_addr;
1495
1496         ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1497         if (!ctx_addr)
1498                 return ERR_PTR(-ENOMEM);
1499
1500         *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1501         if (dma_mapping_error(dev, *dma_addr)) {
1502                 dev_err(dev, "DMA mapping error!\n");
1503                 kfree(ctx_addr);
1504                 return ERR_PTR(-ENOMEM);
1505         }
1506
1507         return ctx_addr;
1508 }
1509
1510 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1511                         const void *ctx_addr, dma_addr_t *dma_addr)
1512 {
1513         struct device *dev = &qm->pdev->dev;
1514
1515         dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1516         kfree(ctx_addr);
1517 }
1518
1519 static int dump_show(struct hisi_qm *qm, void *info,
1520                      unsigned int info_size, char *info_name)
1521 {
1522         struct device *dev = &qm->pdev->dev;
1523         u8 *info_buf, *info_curr = info;
1524         u32 i;
1525 #define BYTE_PER_DW     4
1526
1527         info_buf = kzalloc(info_size, GFP_KERNEL);
1528         if (!info_buf)
1529                 return -ENOMEM;
1530
1531         for (i = 0; i < info_size; i++, info_curr++) {
1532                 if (i % BYTE_PER_DW == 0)
1533                         info_buf[i + 3UL] = *info_curr;
1534                 else if (i % BYTE_PER_DW == 1)
1535                         info_buf[i + 1UL] = *info_curr;
1536                 else if (i % BYTE_PER_DW == 2)
1537                         info_buf[i - 1] = *info_curr;
1538                 else if (i % BYTE_PER_DW == 3)
1539                         info_buf[i - 3] = *info_curr;
1540         }
1541
1542         dev_info(dev, "%s DUMP\n", info_name);
1543         for (i = 0; i < info_size; i += BYTE_PER_DW) {
1544                 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1545                         info_buf[i], info_buf[i + 1UL],
1546                         info_buf[i + 2UL], info_buf[i + 3UL]);
1547         }
1548
1549         kfree(info_buf);
1550
1551         return 0;
1552 }
1553
1554 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1555 {
1556         return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1557 }
1558
1559 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1560 {
1561         return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1562 }
1563
1564 static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1565 {
1566         struct device *dev = &qm->pdev->dev;
1567         struct qm_sqc *sqc, *sqc_curr;
1568         dma_addr_t sqc_dma;
1569         u32 qp_id;
1570         int ret;
1571
1572         if (!s)
1573                 return -EINVAL;
1574
1575         ret = kstrtou32(s, 0, &qp_id);
1576         if (ret || qp_id >= qm->qp_num) {
1577                 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1578                 return -EINVAL;
1579         }
1580
1581         sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1582         if (IS_ERR(sqc))
1583                 return PTR_ERR(sqc);
1584
1585         ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1586         if (ret) {
1587                 down_read(&qm->qps_lock);
1588                 if (qm->sqc) {
1589                         sqc_curr = qm->sqc + qp_id;
1590
1591                         ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1592                                         "SOFT SQC");
1593                         if (ret)
1594                                 dev_info(dev, "Show soft sqc failed!\n");
1595                 }
1596                 up_read(&qm->qps_lock);
1597
1598                 goto err_free_ctx;
1599         }
1600
1601         ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1602         if (ret)
1603                 dev_info(dev, "Show hw sqc failed!\n");
1604
1605 err_free_ctx:
1606         qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1607         return ret;
1608 }
1609
1610 static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1611 {
1612         struct device *dev = &qm->pdev->dev;
1613         struct qm_cqc *cqc, *cqc_curr;
1614         dma_addr_t cqc_dma;
1615         u32 qp_id;
1616         int ret;
1617
1618         if (!s)
1619                 return -EINVAL;
1620
1621         ret = kstrtou32(s, 0, &qp_id);
1622         if (ret || qp_id >= qm->qp_num) {
1623                 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1624                 return -EINVAL;
1625         }
1626
1627         cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1628         if (IS_ERR(cqc))
1629                 return PTR_ERR(cqc);
1630
1631         ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1632         if (ret) {
1633                 down_read(&qm->qps_lock);
1634                 if (qm->cqc) {
1635                         cqc_curr = qm->cqc + qp_id;
1636
1637                         ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1638                                         "SOFT CQC");
1639                         if (ret)
1640                                 dev_info(dev, "Show soft cqc failed!\n");
1641                 }
1642                 up_read(&qm->qps_lock);
1643
1644                 goto err_free_ctx;
1645         }
1646
1647         ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1648         if (ret)
1649                 dev_info(dev, "Show hw cqc failed!\n");
1650
1651 err_free_ctx:
1652         qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1653         return ret;
1654 }
1655
1656 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1657                             int cmd, char *name)
1658 {
1659         struct device *dev = &qm->pdev->dev;
1660         dma_addr_t xeqc_dma;
1661         void *xeqc;
1662         int ret;
1663
1664         if (strsep(&s, " ")) {
1665                 dev_err(dev, "Please do not input extra characters!\n");
1666                 return -EINVAL;
1667         }
1668
1669         xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1670         if (IS_ERR(xeqc))
1671                 return PTR_ERR(xeqc);
1672
1673         ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1674         if (ret)
1675                 goto err_free_ctx;
1676
1677         ret = dump_show(qm, xeqc, size, name);
1678         if (ret)
1679                 dev_info(dev, "Show hw %s failed!\n", name);
1680
1681 err_free_ctx:
1682         qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1683         return ret;
1684 }
1685
1686 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1687                               u32 *e_id, u32 *q_id)
1688 {
1689         struct device *dev = &qm->pdev->dev;
1690         unsigned int qp_num = qm->qp_num;
1691         char *presult;
1692         int ret;
1693
1694         presult = strsep(&s, " ");
1695         if (!presult) {
1696                 dev_err(dev, "Please input qp number!\n");
1697                 return -EINVAL;
1698         }
1699
1700         ret = kstrtou32(presult, 0, q_id);
1701         if (ret || *q_id >= qp_num) {
1702                 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
1703                 return -EINVAL;
1704         }
1705
1706         presult = strsep(&s, " ");
1707         if (!presult) {
1708                 dev_err(dev, "Please input sqe number!\n");
1709                 return -EINVAL;
1710         }
1711
1712         ret = kstrtou32(presult, 0, e_id);
1713         if (ret || *e_id >= QM_Q_DEPTH) {
1714                 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1715                 return -EINVAL;
1716         }
1717
1718         if (strsep(&s, " ")) {
1719                 dev_err(dev, "Please do not input extra characters!\n");
1720                 return -EINVAL;
1721         }
1722
1723         return 0;
1724 }
1725
1726 static int qm_sq_dump(struct hisi_qm *qm, char *s)
1727 {
1728         struct device *dev = &qm->pdev->dev;
1729         void *sqe, *sqe_curr;
1730         struct hisi_qp *qp;
1731         u32 qp_id, sqe_id;
1732         int ret;
1733
1734         ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1735         if (ret)
1736                 return ret;
1737
1738         sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1739         if (!sqe)
1740                 return -ENOMEM;
1741
1742         qp = &qm->qp_array[qp_id];
1743         memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1744         sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1745         memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1746                qm->debug.sqe_mask_len);
1747
1748         ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1749         if (ret)
1750                 dev_info(dev, "Show sqe failed!\n");
1751
1752         kfree(sqe);
1753
1754         return ret;
1755 }
1756
1757 static int qm_cq_dump(struct hisi_qm *qm, char *s)
1758 {
1759         struct device *dev = &qm->pdev->dev;
1760         struct qm_cqe *cqe_curr;
1761         struct hisi_qp *qp;
1762         u32 qp_id, cqe_id;
1763         int ret;
1764
1765         ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1766         if (ret)
1767                 return ret;
1768
1769         qp = &qm->qp_array[qp_id];
1770         cqe_curr = qp->cqe + cqe_id;
1771         ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1772         if (ret)
1773                 dev_info(dev, "Show cqe failed!\n");
1774
1775         return ret;
1776 }
1777
1778 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1779                           size_t size, char *name)
1780 {
1781         struct device *dev = &qm->pdev->dev;
1782         void *xeqe;
1783         u32 xeqe_id;
1784         int ret;
1785
1786         if (!s)
1787                 return -EINVAL;
1788
1789         ret = kstrtou32(s, 0, &xeqe_id);
1790         if (ret)
1791                 return -EINVAL;
1792
1793         if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1794                 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1795                 return -EINVAL;
1796         } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1797                 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1798                 return -EINVAL;
1799         }
1800
1801         down_read(&qm->qps_lock);
1802
1803         if (qm->eqe && !strcmp(name, "EQE")) {
1804                 xeqe = qm->eqe + xeqe_id;
1805         } else if (qm->aeqe && !strcmp(name, "AEQE")) {
1806                 xeqe = qm->aeqe + xeqe_id;
1807         } else {
1808                 ret = -EINVAL;
1809                 goto err_unlock;
1810         }
1811
1812         ret = dump_show(qm, xeqe, size, name);
1813         if (ret)
1814                 dev_info(dev, "Show %s failed!\n", name);
1815
1816 err_unlock:
1817         up_read(&qm->qps_lock);
1818         return ret;
1819 }
1820
1821 static int qm_dbg_help(struct hisi_qm *qm, char *s)
1822 {
1823         struct device *dev = &qm->pdev->dev;
1824
1825         if (strsep(&s, " ")) {
1826                 dev_err(dev, "Please do not input extra characters!\n");
1827                 return -EINVAL;
1828         }
1829
1830         dev_info(dev, "available commands:\n");
1831         dev_info(dev, "sqc <num>\n");
1832         dev_info(dev, "cqc <num>\n");
1833         dev_info(dev, "eqc\n");
1834         dev_info(dev, "aeqc\n");
1835         dev_info(dev, "sq <num> <e>\n");
1836         dev_info(dev, "cq <num> <e>\n");
1837         dev_info(dev, "eq <e>\n");
1838         dev_info(dev, "aeq <e>\n");
1839
1840         return 0;
1841 }
1842
1843 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1844 {
1845         struct device *dev = &qm->pdev->dev;
1846         char *presult, *s, *s_tmp;
1847         int ret;
1848
1849         s = kstrdup(cmd_buf, GFP_KERNEL);
1850         if (!s)
1851                 return -ENOMEM;
1852
1853         s_tmp = s;
1854         presult = strsep(&s, " ");
1855         if (!presult) {
1856                 ret = -EINVAL;
1857                 goto err_buffer_free;
1858         }
1859
1860         if (!strcmp(presult, "sqc"))
1861                 ret = qm_sqc_dump(qm, s);
1862         else if (!strcmp(presult, "cqc"))
1863                 ret = qm_cqc_dump(qm, s);
1864         else if (!strcmp(presult, "eqc"))
1865                 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1866                                        QM_MB_CMD_EQC, "EQC");
1867         else if (!strcmp(presult, "aeqc"))
1868                 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1869                                        QM_MB_CMD_AEQC, "AEQC");
1870         else if (!strcmp(presult, "sq"))
1871                 ret = qm_sq_dump(qm, s);
1872         else if (!strcmp(presult, "cq"))
1873                 ret = qm_cq_dump(qm, s);
1874         else if (!strcmp(presult, "eq"))
1875                 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1876         else if (!strcmp(presult, "aeq"))
1877                 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1878         else if (!strcmp(presult, "help"))
1879                 ret = qm_dbg_help(qm, s);
1880         else
1881                 ret = -EINVAL;
1882
1883         if (ret)
1884                 dev_info(dev, "Please echo help\n");
1885
1886 err_buffer_free:
1887         kfree(s_tmp);
1888
1889         return ret;
1890 }
1891
1892 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1893                             size_t count, loff_t *pos)
1894 {
1895         struct hisi_qm *qm = filp->private_data;
1896         char *cmd_buf, *cmd_buf_tmp;
1897         int ret;
1898
1899         if (*pos)
1900                 return 0;
1901
1902         ret = hisi_qm_get_dfx_access(qm);
1903         if (ret)
1904                 return ret;
1905
1906         /* Judge if the instance is being reset. */
1907         if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1908                 return 0;
1909
1910         if (count > QM_DBG_WRITE_LEN) {
1911                 ret = -ENOSPC;
1912                 goto put_dfx_access;
1913         }
1914
1915         cmd_buf = memdup_user_nul(buffer, count);
1916         if (IS_ERR(cmd_buf)) {
1917                 ret = PTR_ERR(cmd_buf);
1918                 goto put_dfx_access;
1919         }
1920
1921         cmd_buf_tmp = strchr(cmd_buf, '\n');
1922         if (cmd_buf_tmp) {
1923                 *cmd_buf_tmp = '\0';
1924                 count = cmd_buf_tmp - cmd_buf + 1;
1925         }
1926
1927         ret = qm_cmd_write_dump(qm, cmd_buf);
1928         if (ret) {
1929                 kfree(cmd_buf);
1930                 goto put_dfx_access;
1931         }
1932
1933         kfree(cmd_buf);
1934
1935         ret = count;
1936
1937 put_dfx_access:
1938         hisi_qm_put_dfx_access(qm);
1939         return ret;
1940 }
1941
1942 static const struct file_operations qm_cmd_fops = {
1943         .owner = THIS_MODULE,
1944         .open = simple_open,
1945         .read = qm_cmd_read,
1946         .write = qm_cmd_write,
1947 };
1948
1949 static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
1950                                    enum qm_debug_file index)
1951 {
1952         struct debugfs_file *file = qm->debug.files + index;
1953
1954         debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
1955                             &qm_debug_fops);
1956
1957         file->index = index;
1958         mutex_init(&file->lock);
1959         file->debug = &qm->debug;
1960 }
1961
1962 static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1963 {
1964         writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1965 }
1966
1967 static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1968 {
1969         qm->error_mask = ce | nfe | fe;
1970         /* clear QM hw residual error source */
1971         writel(QM_ABNORMAL_INT_SOURCE_CLR,
1972                qm->io_base + QM_ABNORMAL_INT_SOURCE);
1973
1974         /* configure error type */
1975         writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1976         writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1977         writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1978         writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1979 }
1980
1981 static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1982 {
1983         u32 irq_enable = ce | nfe | fe;
1984         u32 irq_unmask = ~irq_enable;
1985
1986         qm_hw_error_cfg(qm, ce, nfe, fe);
1987
1988         irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1989         writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1990 }
1991
1992 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1993 {
1994         writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1995 }
1996
1997 static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1998 {
1999         u32 irq_enable = ce | nfe | fe;
2000         u32 irq_unmask = ~irq_enable;
2001
2002         qm_hw_error_cfg(qm, ce, nfe, fe);
2003
2004         /* enable close master ooo when hardware error happened */
2005         writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
2006
2007         irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2008         writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2009 }
2010
2011 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
2012 {
2013         writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2014
2015         /* disable close master ooo when hardware error happened */
2016         writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
2017 }
2018
2019 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
2020 {
2021         const struct hisi_qm_hw_error *err;
2022         struct device *dev = &qm->pdev->dev;
2023         u32 reg_val, type, vf_num;
2024         int i;
2025
2026         for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
2027                 err = &qm_hw_error[i];
2028                 if (!(err->int_msk & error_status))
2029                         continue;
2030
2031                 dev_err(dev, "%s [error status=0x%x] found\n",
2032                         err->msg, err->int_msk);
2033
2034                 if (err->int_msk & QM_DB_TIMEOUT) {
2035                         reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
2036                         type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
2037                                QM_DB_TIMEOUT_TYPE_SHIFT;
2038                         vf_num = reg_val & QM_DB_TIMEOUT_VF;
2039                         dev_err(dev, "qm %s doorbell timeout in function %u\n",
2040                                 qm_db_timeout[type], vf_num);
2041                 } else if (err->int_msk & QM_OF_FIFO_OF) {
2042                         reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
2043                         type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
2044                                QM_FIFO_OVERFLOW_TYPE_SHIFT;
2045                         vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
2046
2047                         if (type < ARRAY_SIZE(qm_fifo_overflow))
2048                                 dev_err(dev, "qm %s fifo overflow in function %u\n",
2049                                         qm_fifo_overflow[type], vf_num);
2050                         else
2051                                 dev_err(dev, "unknown error type\n");
2052                 }
2053         }
2054 }
2055
2056 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
2057 {
2058         u32 error_status, tmp, val;
2059
2060         /* read err sts */
2061         tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
2062         error_status = qm->error_mask & tmp;
2063
2064         if (error_status) {
2065                 if (error_status & QM_ECC_MBIT)
2066                         qm->err_status.is_qm_ecc_mbit = true;
2067
2068                 qm_log_hw_error(qm, error_status);
2069                 val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
2070                 /* ce error does not need to be reset */
2071                 if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
2072                         writel(error_status, qm->io_base +
2073                                QM_ABNORMAL_INT_SOURCE);
2074                         writel(qm->err_info.nfe,
2075                                qm->io_base + QM_RAS_NFE_ENABLE);
2076                         return ACC_ERR_RECOVERED;
2077                 }
2078
2079                 return ACC_ERR_NEED_RESET;
2080         }
2081
2082         return ACC_ERR_RECOVERED;
2083 }
2084
2085 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
2086 {
2087         return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
2088 }
2089
2090 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
2091 {
2092         return qm->err_ini->get_dev_hw_err_status(qm);
2093 }
2094
2095 /* Check if the error causes the master ooo block */
2096 static int qm_check_dev_error(struct hisi_qm *qm)
2097 {
2098         u32 val, dev_val;
2099
2100         if (qm->fun_type == QM_HW_VF)
2101                 return 0;
2102
2103         val = qm_get_hw_error_status(qm);
2104         dev_val = qm_get_dev_err_status(qm);
2105
2106         if (qm->ver < QM_HW_V3)
2107                 return (val & QM_ECC_MBIT) ||
2108                        (dev_val & qm->err_info.ecc_2bits_mask);
2109
2110         return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
2111                (dev_val & (~qm->err_info.dev_ce_mask));
2112 }
2113
2114 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
2115 {
2116         struct qm_mailbox mailbox;
2117         int ret;
2118
2119         qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
2120         mutex_lock(&qm->mailbox_lock);
2121         ret = qm_mb_nolock(qm, &mailbox);
2122         if (ret)
2123                 goto err_unlock;
2124
2125         *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
2126                   ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
2127
2128 err_unlock:
2129         mutex_unlock(&qm->mailbox_lock);
2130         return ret;
2131 }
2132
2133 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
2134 {
2135         u32 val;
2136
2137         if (qm->fun_type == QM_HW_PF)
2138                 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
2139
2140         val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
2141         val |= QM_IFC_INT_SOURCE_MASK;
2142         writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
2143 }
2144
2145 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
2146 {
2147         struct device *dev = &qm->pdev->dev;
2148         u32 cmd;
2149         u64 msg;
2150         int ret;
2151
2152         ret = qm_get_mb_cmd(qm, &msg, vf_id);
2153         if (ret) {
2154                 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
2155                 return;
2156         }
2157
2158         cmd = msg & QM_MB_CMD_DATA_MASK;
2159         switch (cmd) {
2160         case QM_VF_PREPARE_FAIL:
2161                 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
2162                 break;
2163         case QM_VF_START_FAIL:
2164                 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
2165                 break;
2166         case QM_VF_PREPARE_DONE:
2167         case QM_VF_START_DONE:
2168                 break;
2169         default:
2170                 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
2171                 break;
2172         }
2173 }
2174
2175 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
2176 {
2177         struct device *dev = &qm->pdev->dev;
2178         u32 vfs_num = qm->vfs_num;
2179         int cnt = 0;
2180         int ret = 0;
2181         u64 val;
2182         u32 i;
2183
2184         if (!qm->vfs_num || qm->ver < QM_HW_V3)
2185                 return 0;
2186
2187         while (true) {
2188                 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
2189                 /* All VFs send command to PF, break */
2190                 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
2191                         break;
2192
2193                 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2194                         ret = -EBUSY;
2195                         break;
2196                 }
2197
2198                 msleep(QM_WAIT_DST_ACK);
2199         }
2200
2201         /* PF check VFs msg */
2202         for (i = 1; i <= vfs_num; i++) {
2203                 if (val & BIT(i))
2204                         qm_handle_vf_msg(qm, i);
2205                 else
2206                         dev_err(dev, "VF(%u) not ping PF!\n", i);
2207         }
2208
2209         /* PF clear interrupt to ack VFs */
2210         qm_clear_cmd_interrupt(qm, val);
2211
2212         return ret;
2213 }
2214
2215 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
2216 {
2217         u32 val;
2218
2219         val = readl(qm->io_base + QM_IFC_INT_CFG);
2220         val &= ~QM_IFC_SEND_ALL_VFS;
2221         val |= fun_num;
2222         writel(val, qm->io_base + QM_IFC_INT_CFG);
2223
2224         val = readl(qm->io_base + QM_IFC_INT_SET_P);
2225         val |= QM_IFC_INT_SET_MASK;
2226         writel(val, qm->io_base + QM_IFC_INT_SET_P);
2227 }
2228
2229 static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
2230 {
2231         u32 val;
2232
2233         val = readl(qm->io_base + QM_IFC_INT_SET_V);
2234         val |= QM_IFC_INT_SET_MASK;
2235         writel(val, qm->io_base + QM_IFC_INT_SET_V);
2236 }
2237
2238 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
2239 {
2240         struct device *dev = &qm->pdev->dev;
2241         struct qm_mailbox mailbox;
2242         int cnt = 0;
2243         u64 val;
2244         int ret;
2245
2246         qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
2247         mutex_lock(&qm->mailbox_lock);
2248         ret = qm_mb_nolock(qm, &mailbox);
2249         if (ret) {
2250                 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
2251                 goto err_unlock;
2252         }
2253
2254         qm_trigger_vf_interrupt(qm, fun_num);
2255         while (true) {
2256                 msleep(QM_WAIT_DST_ACK);
2257                 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2258                 /* if VF respond, PF notifies VF successfully. */
2259                 if (!(val & BIT(fun_num)))
2260                         goto err_unlock;
2261
2262                 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2263                         dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
2264                         ret = -ETIMEDOUT;
2265                         break;
2266                 }
2267         }
2268
2269 err_unlock:
2270         mutex_unlock(&qm->mailbox_lock);
2271         return ret;
2272 }
2273
2274 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
2275 {
2276         struct device *dev = &qm->pdev->dev;
2277         u32 vfs_num = qm->vfs_num;
2278         struct qm_mailbox mailbox;
2279         u64 val = 0;
2280         int cnt = 0;
2281         int ret;
2282         u32 i;
2283
2284         qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
2285         mutex_lock(&qm->mailbox_lock);
2286         /* PF sends command to all VFs by mailbox */
2287         ret = qm_mb_nolock(qm, &mailbox);
2288         if (ret) {
2289                 dev_err(dev, "failed to send command to VFs!\n");
2290                 mutex_unlock(&qm->mailbox_lock);
2291                 return ret;
2292         }
2293
2294         qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
2295         while (true) {
2296                 msleep(QM_WAIT_DST_ACK);
2297                 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2298                 /* If all VFs acked, PF notifies VFs successfully. */
2299                 if (!(val & GENMASK(vfs_num, 1))) {
2300                         mutex_unlock(&qm->mailbox_lock);
2301                         return 0;
2302                 }
2303
2304                 if (++cnt > QM_MAX_PF_WAIT_COUNT)
2305                         break;
2306         }
2307
2308         mutex_unlock(&qm->mailbox_lock);
2309
2310         /* Check which vf respond timeout. */
2311         for (i = 1; i <= vfs_num; i++) {
2312                 if (val & BIT(i))
2313                         dev_err(dev, "failed to get response from VF(%u)!\n", i);
2314         }
2315
2316         return -ETIMEDOUT;
2317 }
2318
2319 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
2320 {
2321         struct qm_mailbox mailbox;
2322         int cnt = 0;
2323         u32 val;
2324         int ret;
2325
2326         qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
2327         mutex_lock(&qm->mailbox_lock);
2328         ret = qm_mb_nolock(qm, &mailbox);
2329         if (ret) {
2330                 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
2331                 goto unlock;
2332         }
2333
2334         qm_trigger_pf_interrupt(qm);
2335         /* Waiting for PF response */
2336         while (true) {
2337                 msleep(QM_WAIT_DST_ACK);
2338                 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2339                 if (!(val & QM_IFC_INT_STATUS_MASK))
2340                         break;
2341
2342                 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
2343                         ret = -ETIMEDOUT;
2344                         break;
2345                 }
2346         }
2347
2348 unlock:
2349         mutex_unlock(&qm->mailbox_lock);
2350         return ret;
2351 }
2352
2353 static int qm_stop_qp(struct hisi_qp *qp)
2354 {
2355         return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
2356 }
2357
2358 static int qm_set_msi(struct hisi_qm *qm, bool set)
2359 {
2360         struct pci_dev *pdev = qm->pdev;
2361
2362         if (set) {
2363                 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2364                                        0);
2365         } else {
2366                 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2367                                        ACC_PEH_MSI_DISABLE);
2368                 if (qm->err_status.is_qm_ecc_mbit ||
2369                     qm->err_status.is_dev_ecc_mbit)
2370                         return 0;
2371
2372                 mdelay(1);
2373                 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
2374                         return -EFAULT;
2375         }
2376
2377         return 0;
2378 }
2379
2380 static void qm_wait_msi_finish(struct hisi_qm *qm)
2381 {
2382         struct pci_dev *pdev = qm->pdev;
2383         u32 cmd = ~0;
2384         int cnt = 0;
2385         u32 val;
2386         int ret;
2387
2388         while (true) {
2389                 pci_read_config_dword(pdev, pdev->msi_cap +
2390                                       PCI_MSI_PENDING_64, &cmd);
2391                 if (!cmd)
2392                         break;
2393
2394                 if (++cnt > MAX_WAIT_COUNTS) {
2395                         pci_warn(pdev, "failed to empty MSI PENDING!\n");
2396                         break;
2397                 }
2398
2399                 udelay(1);
2400         }
2401
2402         ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
2403                                          val, !(val & QM_PEH_DFX_MASK),
2404                                          POLL_PERIOD, POLL_TIMEOUT);
2405         if (ret)
2406                 pci_warn(pdev, "failed to empty PEH MSI!\n");
2407
2408         ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
2409                                          val, !(val & QM_PEH_MSI_FINISH_MASK),
2410                                          POLL_PERIOD, POLL_TIMEOUT);
2411         if (ret)
2412                 pci_warn(pdev, "failed to finish MSI operation!\n");
2413 }
2414
2415 static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
2416 {
2417         struct pci_dev *pdev = qm->pdev;
2418         int ret = -ETIMEDOUT;
2419         u32 cmd, i;
2420
2421         pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2422         if (set)
2423                 cmd |= QM_MSI_CAP_ENABLE;
2424         else
2425                 cmd &= ~QM_MSI_CAP_ENABLE;
2426
2427         pci_write_config_dword(pdev, pdev->msi_cap, cmd);
2428         if (set) {
2429                 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
2430                         pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2431                         if (cmd & QM_MSI_CAP_ENABLE)
2432                                 return 0;
2433
2434                         udelay(1);
2435                 }
2436         } else {
2437                 udelay(WAIT_PERIOD_US_MIN);
2438                 qm_wait_msi_finish(qm);
2439                 ret = 0;
2440         }
2441
2442         return ret;
2443 }
2444
2445 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
2446         .qm_db = qm_db_v1,
2447         .get_irq_num = qm_get_irq_num_v1,
2448         .hw_error_init = qm_hw_error_init_v1,
2449         .set_msi = qm_set_msi,
2450 };
2451
2452 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2453         .get_vft = qm_get_vft_v2,
2454         .qm_db = qm_db_v2,
2455         .get_irq_num = qm_get_irq_num_v2,
2456         .hw_error_init = qm_hw_error_init_v2,
2457         .hw_error_uninit = qm_hw_error_uninit_v2,
2458         .hw_error_handle = qm_hw_error_handle_v2,
2459         .set_msi = qm_set_msi,
2460 };
2461
2462 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
2463         .get_vft = qm_get_vft_v2,
2464         .qm_db = qm_db_v2,
2465         .get_irq_num = qm_get_irq_num_v3,
2466         .hw_error_init = qm_hw_error_init_v3,
2467         .hw_error_uninit = qm_hw_error_uninit_v3,
2468         .hw_error_handle = qm_hw_error_handle_v2,
2469         .stop_qp = qm_stop_qp,
2470         .set_msi = qm_set_msi_v3,
2471         .ping_all_vfs = qm_ping_all_vfs,
2472         .ping_pf = qm_ping_pf,
2473 };
2474
2475 static void *qm_get_avail_sqe(struct hisi_qp *qp)
2476 {
2477         struct hisi_qp_status *qp_status = &qp->qp_status;
2478         u16 sq_tail = qp_status->sq_tail;
2479
2480         if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
2481                 return NULL;
2482
2483         return qp->sqe + sq_tail * qp->qm->sqe_size;
2484 }
2485
2486 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
2487 {
2488         u64 *addr;
2489
2490         /* Use last 64 bits of DUS to reset status. */
2491         addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
2492         *addr = 0;
2493 }
2494
2495 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
2496 {
2497         struct device *dev = &qm->pdev->dev;
2498         struct hisi_qp *qp;
2499         int qp_id;
2500
2501         if (!qm_qp_avail_state(qm, NULL, QP_INIT))
2502                 return ERR_PTR(-EPERM);
2503
2504         if (qm->qp_in_used == qm->qp_num) {
2505                 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2506                                      qm->qp_num);
2507                 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2508                 return ERR_PTR(-EBUSY);
2509         }
2510
2511         qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2512         if (qp_id < 0) {
2513                 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2514                                     qm->qp_num);
2515                 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2516                 return ERR_PTR(-EBUSY);
2517         }
2518
2519         qp = &qm->qp_array[qp_id];
2520         hisi_qm_unset_hw_reset(qp);
2521         memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
2522
2523         qp->event_cb = NULL;
2524         qp->req_cb = NULL;
2525         qp->qp_id = qp_id;
2526         qp->alg_type = alg_type;
2527         qp->is_in_kernel = true;
2528         qm->qp_in_used++;
2529         atomic_set(&qp->qp_status.flags, QP_INIT);
2530
2531         return qp;
2532 }
2533
2534 /**
2535  * hisi_qm_create_qp() - Create a queue pair from qm.
2536  * @qm: The qm we create a qp from.
2537  * @alg_type: Accelerator specific algorithm type in sqc.
2538  *
2539  * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
2540  * qp memory fails.
2541  */
2542 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2543 {
2544         struct hisi_qp *qp;
2545         int ret;
2546
2547         ret = qm_pm_get_sync(qm);
2548         if (ret)
2549                 return ERR_PTR(ret);
2550
2551         down_write(&qm->qps_lock);
2552         qp = qm_create_qp_nolock(qm, alg_type);
2553         up_write(&qm->qps_lock);
2554
2555         if (IS_ERR(qp))
2556                 qm_pm_put_sync(qm);
2557
2558         return qp;
2559 }
2560 EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
2561
2562 /**
2563  * hisi_qm_release_qp() - Release a qp back to its qm.
2564  * @qp: The qp we want to release.
2565  *
2566  * This function releases the resource of a qp.
2567  */
2568 void hisi_qm_release_qp(struct hisi_qp *qp)
2569 {
2570         struct hisi_qm *qm = qp->qm;
2571
2572         down_write(&qm->qps_lock);
2573
2574         if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
2575                 up_write(&qm->qps_lock);
2576                 return;
2577         }
2578
2579         qm->qp_in_used--;
2580         idr_remove(&qm->qp_idr, qp->qp_id);
2581
2582         up_write(&qm->qps_lock);
2583
2584         qm_pm_put_sync(qm);
2585 }
2586 EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
2587
2588 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2589 {
2590         struct hisi_qm *qm = qp->qm;
2591         struct device *dev = &qm->pdev->dev;
2592         enum qm_hw_ver ver = qm->ver;
2593         struct qm_sqc *sqc;
2594         dma_addr_t sqc_dma;
2595         int ret;
2596
2597         sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
2598         if (!sqc)
2599                 return -ENOMEM;
2600
2601         INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
2602         if (ver == QM_HW_V1) {
2603                 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2604                 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2605         } else {
2606                 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
2607                 sqc->w8 = 0; /* rand_qc */
2608         }
2609         sqc->cq_num = cpu_to_le16(qp_id);
2610         sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
2611
2612         if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2613                 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
2614                                        QM_QC_PASID_ENABLE_SHIFT);
2615
2616         sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
2617                                  DMA_TO_DEVICE);
2618         if (dma_mapping_error(dev, sqc_dma)) {
2619                 kfree(sqc);
2620                 return -ENOMEM;
2621         }
2622
2623         ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
2624         dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
2625         kfree(sqc);
2626
2627         return ret;
2628 }
2629
2630 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2631 {
2632         struct hisi_qm *qm = qp->qm;
2633         struct device *dev = &qm->pdev->dev;
2634         enum qm_hw_ver ver = qm->ver;
2635         struct qm_cqc *cqc;
2636         dma_addr_t cqc_dma;
2637         int ret;
2638
2639         cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
2640         if (!cqc)
2641                 return -ENOMEM;
2642
2643         INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
2644         if (ver == QM_HW_V1) {
2645                 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
2646                                                         QM_QC_CQE_SIZE));
2647                 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2648         } else {
2649                 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
2650                 cqc->w8 = 0; /* rand_qc */
2651         }
2652         cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2653
2654         if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2655                 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2656
2657         cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
2658                                  DMA_TO_DEVICE);
2659         if (dma_mapping_error(dev, cqc_dma)) {
2660                 kfree(cqc);
2661                 return -ENOMEM;
2662         }
2663
2664         ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2665         dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
2666         kfree(cqc);
2667
2668         return ret;
2669 }
2670
2671 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2672 {
2673         int ret;
2674
2675         qm_init_qp_status(qp);
2676
2677         ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
2678         if (ret)
2679                 return ret;
2680
2681         return qm_cq_ctx_cfg(qp, qp_id, pasid);
2682 }
2683
2684 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
2685 {
2686         struct hisi_qm *qm = qp->qm;
2687         struct device *dev = &qm->pdev->dev;
2688         int qp_id = qp->qp_id;
2689         u32 pasid = arg;
2690         int ret;
2691
2692         if (!qm_qp_avail_state(qm, qp, QP_START))
2693                 return -EPERM;
2694
2695         ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
2696         if (ret)
2697                 return ret;
2698
2699         atomic_set(&qp->qp_status.flags, QP_START);
2700         dev_dbg(dev, "queue %d started\n", qp_id);
2701
2702         return 0;
2703 }
2704
2705 /**
2706  * hisi_qm_start_qp() - Start a qp into running.
2707  * @qp: The qp we want to start to run.
2708  * @arg: Accelerator specific argument.
2709  *
2710  * After this function, qp can receive request from user. Return 0 if
2711  * successful, Return -EBUSY if failed.
2712  */
2713 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
2714 {
2715         struct hisi_qm *qm = qp->qm;
2716         int ret;
2717
2718         down_write(&qm->qps_lock);
2719         ret = qm_start_qp_nolock(qp, arg);
2720         up_write(&qm->qps_lock);
2721
2722         return ret;
2723 }
2724 EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
2725
2726 /**
2727  * qp_stop_fail_cb() - call request cb.
2728  * @qp: stopped failed qp.
2729  *
2730  * Callback function should be called whether task completed or not.
2731  */
2732 static void qp_stop_fail_cb(struct hisi_qp *qp)
2733 {
2734         int qp_used = atomic_read(&qp->qp_status.used);
2735         u16 cur_tail = qp->qp_status.sq_tail;
2736         u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
2737         struct hisi_qm *qm = qp->qm;
2738         u16 pos;
2739         int i;
2740
2741         for (i = 0; i < qp_used; i++) {
2742                 pos = (i + cur_head) % QM_Q_DEPTH;
2743                 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2744                 atomic_dec(&qp->qp_status.used);
2745         }
2746 }
2747
2748 /**
2749  * qm_drain_qp() - Drain a qp.
2750  * @qp: The qp we want to drain.
2751  *
2752  * Determine whether the queue is cleared by judging the tail pointers of
2753  * sq and cq.
2754  */
2755 static int qm_drain_qp(struct hisi_qp *qp)
2756 {
2757         size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
2758         struct hisi_qm *qm = qp->qm;
2759         struct device *dev = &qm->pdev->dev;
2760         struct qm_sqc *sqc;
2761         struct qm_cqc *cqc;
2762         dma_addr_t dma_addr;
2763         int ret = 0, i = 0;
2764         void *addr;
2765
2766         /* No need to judge if master OOO is blocked. */
2767         if (qm_check_dev_error(qm))
2768                 return 0;
2769
2770         /* Kunpeng930 supports drain qp by device */
2771         if (qm->ops->stop_qp) {
2772                 ret = qm->ops->stop_qp(qp);
2773                 if (ret)
2774                         dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
2775                 return ret;
2776         }
2777
2778         addr = qm_ctx_alloc(qm, size, &dma_addr);
2779         if (IS_ERR(addr)) {
2780                 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
2781                 return -ENOMEM;
2782         }
2783
2784         while (++i) {
2785                 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
2786                 if (ret) {
2787                         dev_err_ratelimited(dev, "Failed to dump sqc!\n");
2788                         break;
2789                 }
2790                 sqc = addr;
2791
2792                 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
2793                                       qp->qp_id);
2794                 if (ret) {
2795                         dev_err_ratelimited(dev, "Failed to dump cqc!\n");
2796                         break;
2797                 }
2798                 cqc = addr + sizeof(struct qm_sqc);
2799
2800                 if ((sqc->tail == cqc->tail) &&
2801                     (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
2802                         break;
2803
2804                 if (i == MAX_WAIT_COUNTS) {
2805                         dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
2806                         ret = -EBUSY;
2807                         break;
2808                 }
2809
2810                 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
2811         }
2812
2813         qm_ctx_free(qm, size, addr, &dma_addr);
2814
2815         return ret;
2816 }
2817
2818 static int qm_stop_qp_nolock(struct hisi_qp *qp)
2819 {
2820         struct device *dev = &qp->qm->pdev->dev;
2821         int ret;
2822
2823         /*
2824          * It is allowed to stop and release qp when reset, If the qp is
2825          * stopped when reset but still want to be released then, the
2826          * is_resetting flag should be set negative so that this qp will not
2827          * be restarted after reset.
2828          */
2829         if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
2830                 qp->is_resetting = false;
2831                 return 0;
2832         }
2833
2834         if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
2835                 return -EPERM;
2836
2837         atomic_set(&qp->qp_status.flags, QP_STOP);
2838
2839         ret = qm_drain_qp(qp);
2840         if (ret)
2841                 dev_err(dev, "Failed to drain out data for stopping!\n");
2842
2843         if (qp->qm->wq)
2844                 flush_workqueue(qp->qm->wq);
2845         else
2846                 flush_work(&qp->qm->work);
2847
2848         if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2849                 qp_stop_fail_cb(qp);
2850
2851         dev_dbg(dev, "stop queue %u!", qp->qp_id);
2852
2853         return 0;
2854 }
2855
2856 /**
2857  * hisi_qm_stop_qp() - Stop a qp in qm.
2858  * @qp: The qp we want to stop.
2859  *
2860  * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
2861  */
2862 int hisi_qm_stop_qp(struct hisi_qp *qp)
2863 {
2864         int ret;
2865
2866         down_write(&qp->qm->qps_lock);
2867         ret = qm_stop_qp_nolock(qp);
2868         up_write(&qp->qm->qps_lock);
2869
2870         return ret;
2871 }
2872 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2873
2874 /**
2875  * hisi_qp_send() - Queue up a task in the hardware queue.
2876  * @qp: The qp in which to put the message.
2877  * @msg: The message.
2878  *
2879  * This function will return -EBUSY if qp is currently full, and -EAGAIN
2880  * if qp related qm is resetting.
2881  *
2882  * Note: This function may run with qm_irq_thread and ACC reset at same time.
2883  *       It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
2884  *       reset may happen, we have no lock here considering performance. This
2885  *       causes current qm_db sending fail or can not receive sended sqe. QM
2886  *       sync/async receive function should handle the error sqe. ACC reset
2887  *       done function should clear used sqe to 0.
2888  */
2889 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
2890 {
2891         struct hisi_qp_status *qp_status = &qp->qp_status;
2892         u16 sq_tail = qp_status->sq_tail;
2893         u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
2894         void *sqe = qm_get_avail_sqe(qp);
2895
2896         if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
2897                      atomic_read(&qp->qm->status.flags) == QM_STOP ||
2898                      qp->is_resetting)) {
2899                 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2900                 return -EAGAIN;
2901         }
2902
2903         if (!sqe)
2904                 return -EBUSY;
2905
2906         memcpy(sqe, msg, qp->qm->sqe_size);
2907
2908         qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2909         atomic_inc(&qp->qp_status.used);
2910         qp_status->sq_tail = sq_tail_next;
2911
2912         return 0;
2913 }
2914 EXPORT_SYMBOL_GPL(hisi_qp_send);
2915
2916 static void hisi_qm_cache_wb(struct hisi_qm *qm)
2917 {
2918         unsigned int val;
2919
2920         if (qm->ver == QM_HW_V1)
2921                 return;
2922
2923         writel(0x1, qm->io_base + QM_CACHE_WB_START);
2924         if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2925                                        val, val & BIT(0), POLL_PERIOD,
2926                                        POLL_TIMEOUT))
2927                 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2928 }
2929
2930 static void qm_qp_event_notifier(struct hisi_qp *qp)
2931 {
2932         wake_up_interruptible(&qp->uacce_q->wait);
2933 }
2934
2935 static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2936 {
2937         return hisi_qm_get_free_qp_num(uacce->priv);
2938 }
2939
2940 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
2941 {
2942         int i;
2943
2944         for (i = 0; i < qm->qp_num; i++)
2945                 qm_set_qp_disable(&qm->qp_array[i], offset);
2946 }
2947
2948 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2949                                    unsigned long arg,
2950                                    struct uacce_queue *q)
2951 {
2952         struct hisi_qm *qm = uacce->priv;
2953         struct hisi_qp *qp;
2954         u8 alg_type = 0;
2955
2956         qp = hisi_qm_create_qp(qm, alg_type);
2957         if (IS_ERR(qp))
2958                 return PTR_ERR(qp);
2959
2960         q->priv = qp;
2961         q->uacce = uacce;
2962         qp->uacce_q = q;
2963         qp->event_cb = qm_qp_event_notifier;
2964         qp->pasid = arg;
2965         qp->is_in_kernel = false;
2966
2967         return 0;
2968 }
2969
2970 static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2971 {
2972         struct hisi_qp *qp = q->priv;
2973
2974         hisi_qm_cache_wb(qp->qm);
2975         hisi_qm_release_qp(qp);
2976 }
2977
2978 /* map sq/cq/doorbell to user space */
2979 static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2980                               struct vm_area_struct *vma,
2981                               struct uacce_qfile_region *qfr)
2982 {
2983         struct hisi_qp *qp = q->priv;
2984         struct hisi_qm *qm = qp->qm;
2985         resource_size_t phys_base = qm->db_phys_base +
2986                                     qp->qp_id * qm->db_interval;
2987         size_t sz = vma->vm_end - vma->vm_start;
2988         struct pci_dev *pdev = qm->pdev;
2989         struct device *dev = &pdev->dev;
2990         unsigned long vm_pgoff;
2991         int ret;
2992
2993         switch (qfr->type) {
2994         case UACCE_QFRT_MMIO:
2995                 if (qm->ver == QM_HW_V1) {
2996                         if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2997                                 return -EINVAL;
2998                 } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
2999                         if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
3000                             QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
3001                                 return -EINVAL;
3002                 } else {
3003                         if (sz > qm->db_interval)
3004                                 return -EINVAL;
3005                 }
3006
3007                 vma->vm_flags |= VM_IO;
3008
3009                 return remap_pfn_range(vma, vma->vm_start,
3010                                        phys_base >> PAGE_SHIFT,
3011                                        sz, pgprot_noncached(vma->vm_page_prot));
3012         case UACCE_QFRT_DUS:
3013                 if (sz != qp->qdma.size)
3014                         return -EINVAL;
3015
3016                 /*
3017                  * dma_mmap_coherent() requires vm_pgoff as 0
3018                  * restore vm_pfoff to initial value for mmap()
3019                  */
3020                 vm_pgoff = vma->vm_pgoff;
3021                 vma->vm_pgoff = 0;
3022                 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
3023                                         qp->qdma.dma, sz);
3024                 vma->vm_pgoff = vm_pgoff;
3025                 return ret;
3026
3027         default:
3028                 return -EINVAL;
3029         }
3030 }
3031
3032 static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
3033 {
3034         struct hisi_qp *qp = q->priv;
3035
3036         return hisi_qm_start_qp(qp, qp->pasid);
3037 }
3038
3039 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
3040 {
3041         hisi_qm_stop_qp(q->priv);
3042 }
3043
3044 static int hisi_qm_is_q_updated(struct uacce_queue *q)
3045 {
3046         struct hisi_qp *qp = q->priv;
3047         struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
3048         int updated = 0;
3049
3050         while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
3051                 /* make sure to read data from memory */
3052                 dma_rmb();
3053                 qm_cq_head_update(qp);
3054                 cqe = qp->cqe + qp->qp_status.cq_head;
3055                 updated = 1;
3056         }
3057
3058         return updated;
3059 }
3060
3061 static void qm_set_sqctype(struct uacce_queue *q, u16 type)
3062 {
3063         struct hisi_qm *qm = q->uacce->priv;
3064         struct hisi_qp *qp = q->priv;
3065
3066         down_write(&qm->qps_lock);
3067         qp->alg_type = type;
3068         up_write(&qm->qps_lock);
3069 }
3070
3071 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
3072                                 unsigned long arg)
3073 {
3074         struct hisi_qp *qp = q->priv;
3075         struct hisi_qp_ctx qp_ctx;
3076
3077         if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
3078                 if (copy_from_user(&qp_ctx, (void __user *)arg,
3079                                    sizeof(struct hisi_qp_ctx)))
3080                         return -EFAULT;
3081
3082                 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
3083                         return -EINVAL;
3084
3085                 qm_set_sqctype(q, qp_ctx.qc_type);
3086                 qp_ctx.id = qp->qp_id;
3087
3088                 if (copy_to_user((void __user *)arg, &qp_ctx,
3089                                  sizeof(struct hisi_qp_ctx)))
3090                         return -EFAULT;
3091         } else {
3092                 return -EINVAL;
3093         }
3094
3095         return 0;
3096 }
3097
3098 static const struct uacce_ops uacce_qm_ops = {
3099         .get_available_instances = hisi_qm_get_available_instances,
3100         .get_queue = hisi_qm_uacce_get_queue,
3101         .put_queue = hisi_qm_uacce_put_queue,
3102         .start_queue = hisi_qm_uacce_start_queue,
3103         .stop_queue = hisi_qm_uacce_stop_queue,
3104         .mmap = hisi_qm_uacce_mmap,
3105         .ioctl = hisi_qm_uacce_ioctl,
3106         .is_q_updated = hisi_qm_is_q_updated,
3107 };
3108
3109 static int qm_alloc_uacce(struct hisi_qm *qm)
3110 {
3111         struct pci_dev *pdev = qm->pdev;
3112         struct uacce_device *uacce;
3113         unsigned long mmio_page_nr;
3114         unsigned long dus_page_nr;
3115         struct uacce_interface interface = {
3116                 .flags = UACCE_DEV_SVA,
3117                 .ops = &uacce_qm_ops,
3118         };
3119         int ret;
3120
3121         ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
3122                       sizeof(interface.name));
3123         if (ret < 0)
3124                 return -ENAMETOOLONG;
3125
3126         uacce = uacce_alloc(&pdev->dev, &interface);
3127         if (IS_ERR(uacce))
3128                 return PTR_ERR(uacce);
3129
3130         if (uacce->flags & UACCE_DEV_SVA) {
3131                 qm->use_sva = true;
3132         } else {
3133                 /* only consider sva case */
3134                 uacce_remove(uacce);
3135                 qm->uacce = NULL;
3136                 return -EINVAL;
3137         }
3138
3139         uacce->is_vf = pdev->is_virtfn;
3140         uacce->priv = qm;
3141         uacce->algs = qm->algs;
3142
3143         if (qm->ver == QM_HW_V1)
3144                 uacce->api_ver = HISI_QM_API_VER_BASE;
3145         else if (qm->ver == QM_HW_V2)
3146                 uacce->api_ver = HISI_QM_API_VER2_BASE;
3147         else
3148                 uacce->api_ver = HISI_QM_API_VER3_BASE;
3149
3150         if (qm->ver == QM_HW_V1)
3151                 mmio_page_nr = QM_DOORBELL_PAGE_NR;
3152         else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
3153                 mmio_page_nr = QM_DOORBELL_PAGE_NR +
3154                         QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
3155         else
3156                 mmio_page_nr = qm->db_interval / PAGE_SIZE;
3157
3158         /* Add one more page for device or qp status */
3159         dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
3160                        sizeof(struct qm_cqe) * QM_Q_DEPTH  + PAGE_SIZE) >>
3161                                          PAGE_SHIFT;
3162
3163         uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
3164         uacce->qf_pg_num[UACCE_QFRT_DUS]  = dus_page_nr;
3165
3166         qm->uacce = uacce;
3167
3168         return 0;
3169 }
3170
3171 /**
3172  * qm_frozen() - Try to froze QM to cut continuous queue request. If
3173  * there is user on the QM, return failure without doing anything.
3174  * @qm: The qm needed to be fronzen.
3175  *
3176  * This function frozes QM, then we can do SRIOV disabling.
3177  */
3178 static int qm_frozen(struct hisi_qm *qm)
3179 {
3180         if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
3181                 return 0;
3182
3183         down_write(&qm->qps_lock);
3184
3185         if (!qm->qp_in_used) {
3186                 qm->qp_in_used = qm->qp_num;
3187                 up_write(&qm->qps_lock);
3188                 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
3189                 return 0;
3190         }
3191
3192         up_write(&qm->qps_lock);
3193
3194         return -EBUSY;
3195 }
3196
3197 static int qm_try_frozen_vfs(struct pci_dev *pdev,
3198                              struct hisi_qm_list *qm_list)
3199 {
3200         struct hisi_qm *qm, *vf_qm;
3201         struct pci_dev *dev;
3202         int ret = 0;
3203
3204         if (!qm_list || !pdev)
3205                 return -EINVAL;
3206
3207         /* Try to frozen all the VFs as disable SRIOV */
3208         mutex_lock(&qm_list->lock);
3209         list_for_each_entry(qm, &qm_list->list, list) {
3210                 dev = qm->pdev;
3211                 if (dev == pdev)
3212                         continue;
3213                 if (pci_physfn(dev) == pdev) {
3214                         vf_qm = pci_get_drvdata(dev);
3215                         ret = qm_frozen(vf_qm);
3216                         if (ret)
3217                                 goto frozen_fail;
3218                 }
3219         }
3220
3221 frozen_fail:
3222         mutex_unlock(&qm_list->lock);
3223
3224         return ret;
3225 }
3226
3227 /**
3228  * hisi_qm_wait_task_finish() - Wait until the task is finished
3229  * when removing the driver.
3230  * @qm: The qm needed to wait for the task to finish.
3231  * @qm_list: The list of all available devices.
3232  */
3233 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3234 {
3235         while (qm_frozen(qm) ||
3236                ((qm->fun_type == QM_HW_PF) &&
3237                qm_try_frozen_vfs(qm->pdev, qm_list))) {
3238                 msleep(WAIT_PERIOD);
3239         }
3240
3241         while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
3242                test_bit(QM_RESETTING, &qm->misc_ctl))
3243                 msleep(WAIT_PERIOD);
3244
3245         udelay(REMOVE_WAIT_DELAY);
3246 }
3247 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
3248
3249 /**
3250  * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
3251  * @qm: The qm which want to get free qp.
3252  *
3253  * This function return free number of qp in qm.
3254  */
3255 int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
3256 {
3257         int ret;
3258
3259         down_read(&qm->qps_lock);
3260         ret = qm->qp_num - qm->qp_in_used;
3261         up_read(&qm->qps_lock);
3262
3263         return ret;
3264 }
3265 EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
3266
3267 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
3268 {
3269         struct device *dev = &qm->pdev->dev;
3270         struct qm_dma *qdma;
3271         int i;
3272
3273         for (i = num - 1; i >= 0; i--) {
3274                 qdma = &qm->qp_array[i].qdma;
3275                 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
3276         }
3277
3278         kfree(qm->qp_array);
3279 }
3280
3281 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
3282 {
3283         struct device *dev = &qm->pdev->dev;
3284         size_t off = qm->sqe_size * QM_Q_DEPTH;
3285         struct hisi_qp *qp;
3286
3287         qp = &qm->qp_array[id];
3288         qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
3289                                          GFP_KERNEL);
3290         if (!qp->qdma.va)
3291                 return -ENOMEM;
3292
3293         qp->sqe = qp->qdma.va;
3294         qp->sqe_dma = qp->qdma.dma;
3295         qp->cqe = qp->qdma.va + off;
3296         qp->cqe_dma = qp->qdma.dma + off;
3297         qp->qdma.size = dma_size;
3298         qp->qm = qm;
3299         qp->qp_id = id;
3300
3301         return 0;
3302 }
3303
3304 static void hisi_qm_pre_init(struct hisi_qm *qm)
3305 {
3306         struct pci_dev *pdev = qm->pdev;
3307
3308         if (qm->ver == QM_HW_V1)
3309                 qm->ops = &qm_hw_ops_v1;
3310         else if (qm->ver == QM_HW_V2)
3311                 qm->ops = &qm_hw_ops_v2;
3312         else
3313                 qm->ops = &qm_hw_ops_v3;
3314
3315         pci_set_drvdata(pdev, qm);
3316         mutex_init(&qm->mailbox_lock);
3317         init_rwsem(&qm->qps_lock);
3318         qm->qp_in_used = 0;
3319         qm->misc_ctl = false;
3320         if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
3321                 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
3322                         dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
3323         }
3324 }
3325
3326 static void qm_cmd_uninit(struct hisi_qm *qm)
3327 {
3328         u32 val;
3329
3330         if (qm->ver < QM_HW_V3)
3331                 return;
3332
3333         val = readl(qm->io_base + QM_IFC_INT_MASK);
3334         val |= QM_IFC_INT_DISABLE;
3335         writel(val, qm->io_base + QM_IFC_INT_MASK);
3336 }
3337
3338 static void qm_cmd_init(struct hisi_qm *qm)
3339 {
3340         u32 val;
3341
3342         if (qm->ver < QM_HW_V3)
3343                 return;
3344
3345         /* Clear communication interrupt source */
3346         qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
3347
3348         /* Enable pf to vf communication reg. */
3349         val = readl(qm->io_base + QM_IFC_INT_MASK);
3350         val &= ~QM_IFC_INT_DISABLE;
3351         writel(val, qm->io_base + QM_IFC_INT_MASK);
3352 }
3353
3354 static void qm_put_pci_res(struct hisi_qm *qm)
3355 {
3356         struct pci_dev *pdev = qm->pdev;
3357
3358         if (qm->use_db_isolation)
3359                 iounmap(qm->db_io_base);
3360
3361         iounmap(qm->io_base);
3362         pci_release_mem_regions(pdev);
3363 }
3364
3365 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3366 {
3367         struct pci_dev *pdev = qm->pdev;
3368
3369         pci_free_irq_vectors(pdev);
3370         qm_put_pci_res(qm);
3371         pci_disable_device(pdev);
3372 }
3373
3374 /**
3375  * hisi_qm_uninit() - Uninitialize qm.
3376  * @qm: The qm needed uninit.
3377  *
3378  * This function uninits qm related device resources.
3379  */
3380 void hisi_qm_uninit(struct hisi_qm *qm)
3381 {
3382         struct pci_dev *pdev = qm->pdev;
3383         struct device *dev = &pdev->dev;
3384
3385         qm_cmd_uninit(qm);
3386         kfree(qm->factor);
3387         down_write(&qm->qps_lock);
3388
3389         if (!qm_avail_state(qm, QM_CLOSE)) {
3390                 up_write(&qm->qps_lock);
3391                 return;
3392         }
3393
3394         hisi_qp_memory_uninit(qm, qm->qp_num);
3395         idr_destroy(&qm->qp_idr);
3396
3397         if (qm->qdma.va) {
3398                 hisi_qm_cache_wb(qm);
3399                 dma_free_coherent(dev, qm->qdma.size,
3400                                   qm->qdma.va, qm->qdma.dma);
3401         }
3402
3403         qm_irq_unregister(qm);
3404         hisi_qm_pci_uninit(qm);
3405         if (qm->use_sva) {
3406                 uacce_remove(qm->uacce);
3407                 qm->uacce = NULL;
3408         }
3409
3410         up_write(&qm->qps_lock);
3411 }
3412 EXPORT_SYMBOL_GPL(hisi_qm_uninit);
3413
3414 /**
3415  * hisi_qm_get_vft() - Get vft from a qm.
3416  * @qm: The qm we want to get its vft.
3417  * @base: The base number of queue in vft.
3418  * @number: The number of queues in vft.
3419  *
3420  * We can allocate multiple queues to a qm by configuring virtual function
3421  * table. We get related configures by this function. Normally, we call this
3422  * function in VF driver to get the queue information.
3423  *
3424  * qm hw v1 does not support this interface.
3425  */
3426 int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3427 {
3428         if (!base || !number)
3429                 return -EINVAL;
3430
3431         if (!qm->ops->get_vft) {
3432                 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3433                 return -EINVAL;
3434         }
3435
3436         return qm->ops->get_vft(qm, base, number);
3437 }
3438 EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
3439
3440 /**
3441  * hisi_qm_set_vft() - Set vft to a qm.
3442  * @qm: The qm we want to set its vft.
3443  * @fun_num: The function number.
3444  * @base: The base number of queue in vft.
3445  * @number: The number of queues in vft.
3446  *
3447  * This function is alway called in PF driver, it is used to assign queues
3448  * among PF and VFs.
3449  *
3450  * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3451  * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3452  * (VF function number 0x2)
3453  */
3454 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3455                     u32 number)
3456 {
3457         u32 max_q_num = qm->ctrl_qp_num;
3458
3459         if (base >= max_q_num || number > max_q_num ||
3460             (base + number) > max_q_num)
3461                 return -EINVAL;
3462
3463         return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3464 }
3465
3466 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3467 {
3468         struct hisi_qm_status *status = &qm->status;
3469
3470         status->eq_head = 0;
3471         status->aeq_head = 0;
3472         status->eqc_phase = true;
3473         status->aeqc_phase = true;
3474 }
3475
3476 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3477 {
3478         struct device *dev = &qm->pdev->dev;
3479         struct qm_eqc *eqc;
3480         dma_addr_t eqc_dma;
3481         int ret;
3482
3483         eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
3484         if (!eqc)
3485                 return -ENOMEM;
3486
3487         eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3488         eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3489         if (qm->ver == QM_HW_V1)
3490                 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3491         eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3492
3493         eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
3494                                  DMA_TO_DEVICE);
3495         if (dma_mapping_error(dev, eqc_dma)) {
3496                 kfree(eqc);
3497                 return -ENOMEM;
3498         }
3499
3500         ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3501         dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
3502         kfree(eqc);
3503
3504         return ret;
3505 }
3506
3507 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3508 {
3509         struct device *dev = &qm->pdev->dev;
3510         struct qm_aeqc *aeqc;
3511         dma_addr_t aeqc_dma;
3512         int ret;
3513
3514         aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
3515         if (!aeqc)
3516                 return -ENOMEM;
3517
3518         aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3519         aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3520         aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3521
3522         aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
3523                                   DMA_TO_DEVICE);
3524         if (dma_mapping_error(dev, aeqc_dma)) {
3525                 kfree(aeqc);
3526                 return -ENOMEM;
3527         }
3528
3529         ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3530         dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
3531         kfree(aeqc);
3532
3533         return ret;
3534 }
3535
3536 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3537 {
3538         struct device *dev = &qm->pdev->dev;
3539         int ret;
3540
3541         qm_init_eq_aeq_status(qm);
3542
3543         ret = qm_eq_ctx_cfg(qm);
3544         if (ret) {
3545                 dev_err(dev, "Set eqc failed!\n");
3546                 return ret;
3547         }
3548
3549         return qm_aeq_ctx_cfg(qm);
3550 }
3551
3552 static int __hisi_qm_start(struct hisi_qm *qm)
3553 {
3554         int ret;
3555
3556         WARN_ON(!qm->qdma.va);
3557
3558         if (qm->fun_type == QM_HW_PF) {
3559                 ret = qm_dev_mem_reset(qm);
3560                 if (ret)
3561                         return ret;
3562
3563                 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3564                 if (ret)
3565                         return ret;
3566         }
3567
3568         ret = qm_eq_aeq_ctx_cfg(qm);
3569         if (ret)
3570                 return ret;
3571
3572         ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3573         if (ret)
3574                 return ret;
3575
3576         ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3577         if (ret)
3578                 return ret;
3579
3580         qm_init_prefetch(qm);
3581
3582         writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3583         writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3584
3585         return 0;
3586 }
3587
3588 /**
3589  * hisi_qm_start() - start qm
3590  * @qm: The qm to be started.
3591  *
3592  * This function starts a qm, then we can allocate qp from this qm.
3593  */
3594 int hisi_qm_start(struct hisi_qm *qm)
3595 {
3596         struct device *dev = &qm->pdev->dev;
3597         int ret = 0;
3598
3599         down_write(&qm->qps_lock);
3600
3601         if (!qm_avail_state(qm, QM_START)) {
3602                 up_write(&qm->qps_lock);
3603                 return -EPERM;
3604         }
3605
3606         dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3607
3608         if (!qm->qp_num) {
3609                 dev_err(dev, "qp_num should not be 0\n");
3610                 ret = -EINVAL;
3611                 goto err_unlock;
3612         }
3613
3614         ret = __hisi_qm_start(qm);
3615         if (!ret)
3616                 atomic_set(&qm->status.flags, QM_START);
3617
3618 err_unlock:
3619         up_write(&qm->qps_lock);
3620         return ret;
3621 }
3622 EXPORT_SYMBOL_GPL(hisi_qm_start);
3623
3624 static int qm_restart(struct hisi_qm *qm)
3625 {
3626         struct device *dev = &qm->pdev->dev;
3627         struct hisi_qp *qp;
3628         int ret, i;
3629
3630         ret = hisi_qm_start(qm);
3631         if (ret < 0)
3632                 return ret;
3633
3634         down_write(&qm->qps_lock);
3635         for (i = 0; i < qm->qp_num; i++) {
3636                 qp = &qm->qp_array[i];
3637                 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3638                     qp->is_resetting == true) {
3639                         ret = qm_start_qp_nolock(qp, 0);
3640                         if (ret < 0) {
3641                                 dev_err(dev, "Failed to start qp%d!\n", i);
3642
3643                                 up_write(&qm->qps_lock);
3644                                 return ret;
3645                         }
3646                         qp->is_resetting = false;
3647                 }
3648         }
3649         up_write(&qm->qps_lock);
3650
3651         return 0;
3652 }
3653
3654 /* Stop started qps in reset flow */
3655 static int qm_stop_started_qp(struct hisi_qm *qm)
3656 {
3657         struct device *dev = &qm->pdev->dev;
3658         struct hisi_qp *qp;
3659         int i, ret;
3660
3661         for (i = 0; i < qm->qp_num; i++) {
3662                 qp = &qm->qp_array[i];
3663                 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
3664                         qp->is_resetting = true;
3665                         ret = qm_stop_qp_nolock(qp);
3666                         if (ret < 0) {
3667                                 dev_err(dev, "Failed to stop qp%d!\n", i);
3668                                 return ret;
3669                         }
3670                 }
3671         }
3672
3673         return 0;
3674 }
3675
3676
3677 /**
3678  * qm_clear_queues() - Clear all queues memory in a qm.
3679  * @qm: The qm in which the queues will be cleared.
3680  *
3681  * This function clears all queues memory in a qm. Reset of accelerator can
3682  * use this to clear queues.
3683  */
3684 static void qm_clear_queues(struct hisi_qm *qm)
3685 {
3686         struct hisi_qp *qp;
3687         int i;
3688
3689         for (i = 0; i < qm->qp_num; i++) {
3690                 qp = &qm->qp_array[i];
3691                 if (qp->is_resetting)
3692                         memset(qp->qdma.va, 0, qp->qdma.size);
3693         }
3694
3695         memset(qm->qdma.va, 0, qm->qdma.size);
3696 }
3697
3698 /**
3699  * hisi_qm_stop() - Stop a qm.
3700  * @qm: The qm which will be stopped.
3701  * @r: The reason to stop qm.
3702  *
3703  * This function stops qm and its qps, then qm can not accept request.
3704  * Related resources are not released at this state, we can use hisi_qm_start
3705  * to let qm start again.
3706  */
3707 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3708 {
3709         struct device *dev = &qm->pdev->dev;
3710         int ret = 0;
3711
3712         down_write(&qm->qps_lock);
3713
3714         qm->status.stop_reason = r;
3715         if (!qm_avail_state(qm, QM_STOP)) {
3716                 ret = -EPERM;
3717                 goto err_unlock;
3718         }
3719
3720         if (qm->status.stop_reason == QM_SOFT_RESET ||
3721             qm->status.stop_reason == QM_FLR) {
3722                 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3723                 ret = qm_stop_started_qp(qm);
3724                 if (ret < 0) {
3725                         dev_err(dev, "Failed to stop started qp!\n");
3726                         goto err_unlock;
3727                 }
3728                 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3729         }
3730
3731         /* Mask eq and aeq irq */
3732         writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3733         writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3734
3735         if (qm->fun_type == QM_HW_PF) {
3736                 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3737                 if (ret < 0) {
3738                         dev_err(dev, "Failed to set vft!\n");
3739                         ret = -EBUSY;
3740                         goto err_unlock;
3741                 }
3742         }
3743
3744         qm_clear_queues(qm);
3745         atomic_set(&qm->status.flags, QM_STOP);
3746
3747 err_unlock:
3748         up_write(&qm->qps_lock);
3749         return ret;
3750 }
3751 EXPORT_SYMBOL_GPL(hisi_qm_stop);
3752
3753 static ssize_t qm_status_read(struct file *filp, char __user *buffer,
3754                               size_t count, loff_t *pos)
3755 {
3756         struct hisi_qm *qm = filp->private_data;
3757         char buf[QM_DBG_READ_LEN];
3758         int val, len;
3759
3760         val = atomic_read(&qm->status.flags);
3761         len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
3762
3763         return simple_read_from_buffer(buffer, count, pos, buf, len);
3764 }
3765
3766 static const struct file_operations qm_status_fops = {
3767         .owner = THIS_MODULE,
3768         .open = simple_open,
3769         .read = qm_status_read,
3770 };
3771
3772 static int qm_debugfs_atomic64_set(void *data, u64 val)
3773 {
3774         if (val)
3775                 return -EINVAL;
3776
3777         atomic64_set((atomic64_t *)data, 0);
3778
3779         return 0;
3780 }
3781
3782 static int qm_debugfs_atomic64_get(void *data, u64 *val)
3783 {
3784         *val = atomic64_read((atomic64_t *)data);
3785
3786         return 0;
3787 }
3788
3789 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
3790                          qm_debugfs_atomic64_set, "%llu\n");
3791
3792 static void qm_hw_error_init(struct hisi_qm *qm)
3793 {
3794         struct hisi_qm_err_info *err_info = &qm->err_info;
3795
3796         if (!qm->ops->hw_error_init) {
3797                 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3798                 return;
3799         }
3800
3801         qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
3802 }
3803
3804 static void qm_hw_error_uninit(struct hisi_qm *qm)
3805 {
3806         if (!qm->ops->hw_error_uninit) {
3807                 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3808                 return;
3809         }
3810
3811         qm->ops->hw_error_uninit(qm);
3812 }
3813
3814 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3815 {
3816         if (!qm->ops->hw_error_handle) {
3817                 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3818                 return ACC_ERR_NONE;
3819         }
3820
3821         return qm->ops->hw_error_handle(qm);
3822 }
3823
3824 /**
3825  * hisi_qm_dev_err_init() - Initialize device error configuration.
3826  * @qm: The qm for which we want to do error initialization.
3827  *
3828  * Initialize QM and device error related configuration.
3829  */
3830 void hisi_qm_dev_err_init(struct hisi_qm *qm)
3831 {
3832         if (qm->fun_type == QM_HW_VF)
3833                 return;
3834
3835         qm_hw_error_init(qm);
3836
3837         if (!qm->err_ini->hw_err_enable) {
3838                 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3839                 return;
3840         }
3841         qm->err_ini->hw_err_enable(qm);
3842 }
3843 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3844
3845 /**
3846  * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3847  * @qm: The qm for which we want to do error uninitialization.
3848  *
3849  * Uninitialize QM and device error related configuration.
3850  */
3851 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3852 {
3853         if (qm->fun_type == QM_HW_VF)
3854                 return;
3855
3856         qm_hw_error_uninit(qm);
3857
3858         if (!qm->err_ini->hw_err_disable) {
3859                 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3860                 return;
3861         }
3862         qm->err_ini->hw_err_disable(qm);
3863 }
3864 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
3865
3866 /**
3867  * hisi_qm_free_qps() - free multiple queue pairs.
3868  * @qps: The queue pairs need to be freed.
3869  * @qp_num: The num of queue pairs.
3870  */
3871 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
3872 {
3873         int i;
3874
3875         if (!qps || qp_num <= 0)
3876                 return;
3877
3878         for (i = qp_num - 1; i >= 0; i--)
3879                 hisi_qm_release_qp(qps[i]);
3880 }
3881 EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
3882
3883 static void free_list(struct list_head *head)
3884 {
3885         struct hisi_qm_resource *res, *tmp;
3886
3887         list_for_each_entry_safe(res, tmp, head, list) {
3888                 list_del(&res->list);
3889                 kfree(res);
3890         }
3891 }
3892
3893 static int hisi_qm_sort_devices(int node, struct list_head *head,
3894                                 struct hisi_qm_list *qm_list)
3895 {
3896         struct hisi_qm_resource *res, *tmp;
3897         struct hisi_qm *qm;
3898         struct list_head *n;
3899         struct device *dev;
3900         int dev_node = 0;
3901
3902         list_for_each_entry(qm, &qm_list->list, list) {
3903                 dev = &qm->pdev->dev;
3904
3905                 if (IS_ENABLED(CONFIG_NUMA)) {
3906                         dev_node = dev_to_node(dev);
3907                         if (dev_node < 0)
3908                                 dev_node = 0;
3909                 }
3910
3911                 res = kzalloc(sizeof(*res), GFP_KERNEL);
3912                 if (!res)
3913                         return -ENOMEM;
3914
3915                 res->qm = qm;
3916                 res->distance = node_distance(dev_node, node);
3917                 n = head;
3918                 list_for_each_entry(tmp, head, list) {
3919                         if (res->distance < tmp->distance) {
3920                                 n = &tmp->list;
3921                                 break;
3922                         }
3923                 }
3924                 list_add_tail(&res->list, n);
3925         }
3926
3927         return 0;
3928 }
3929
3930 /**
3931  * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3932  * @qm_list: The list of all available devices.
3933  * @qp_num: The number of queue pairs need created.
3934  * @alg_type: The algorithm type.
3935  * @node: The numa node.
3936  * @qps: The queue pairs need created.
3937  *
3938  * This function will sort all available device according to numa distance.
3939  * Then try to create all queue pairs from one device, if all devices do
3940  * not meet the requirements will return error.
3941  */
3942 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3943                            u8 alg_type, int node, struct hisi_qp **qps)
3944 {
3945         struct hisi_qm_resource *tmp;
3946         int ret = -ENODEV;
3947         LIST_HEAD(head);
3948         int i;
3949
3950         if (!qps || !qm_list || qp_num <= 0)
3951                 return -EINVAL;
3952
3953         mutex_lock(&qm_list->lock);
3954         if (hisi_qm_sort_devices(node, &head, qm_list)) {
3955                 mutex_unlock(&qm_list->lock);
3956                 goto err;
3957         }
3958
3959         list_for_each_entry(tmp, &head, list) {
3960                 for (i = 0; i < qp_num; i++) {
3961                         qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3962                         if (IS_ERR(qps[i])) {
3963                                 hisi_qm_free_qps(qps, i);
3964                                 break;
3965                         }
3966                 }
3967
3968                 if (i == qp_num) {
3969                         ret = 0;
3970                         break;
3971                 }
3972         }
3973
3974         mutex_unlock(&qm_list->lock);
3975         if (ret)
3976                 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
3977                         node, alg_type, qp_num);
3978
3979 err:
3980         free_list(&head);
3981         return ret;
3982 }
3983 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3984
3985 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3986 {
3987         u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
3988         u32 max_qp_num = qm->max_qp_num;
3989         u32 q_base = qm->qp_num;
3990         int ret;
3991
3992         if (!num_vfs)
3993                 return -EINVAL;
3994
3995         vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3996
3997         /* If vfs_q_num is less than num_vfs, return error. */
3998         if (vfs_q_num < num_vfs)
3999                 return -EINVAL;
4000
4001         q_num = vfs_q_num / num_vfs;
4002         remain_q_num = vfs_q_num % num_vfs;
4003
4004         for (i = num_vfs; i > 0; i--) {
4005                 /*
4006                  * if q_num + remain_q_num > max_qp_num in last vf, divide the
4007                  * remaining queues equally.
4008                  */
4009                 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
4010                         act_q_num = q_num + remain_q_num;
4011                         remain_q_num = 0;
4012                 } else if (remain_q_num > 0) {
4013                         act_q_num = q_num + 1;
4014                         remain_q_num--;
4015                 } else {
4016                         act_q_num = q_num;
4017                 }
4018
4019                 act_q_num = min_t(int, act_q_num, max_qp_num);
4020                 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
4021                 if (ret) {
4022                         for (j = num_vfs; j > i; j--)
4023                                 hisi_qm_set_vft(qm, j, 0, 0);
4024                         return ret;
4025                 }
4026                 q_base += act_q_num;
4027         }
4028
4029         return 0;
4030 }
4031
4032 static int qm_clear_vft_config(struct hisi_qm *qm)
4033 {
4034         int ret;
4035         u32 i;
4036
4037         for (i = 1; i <= qm->vfs_num; i++) {
4038                 ret = hisi_qm_set_vft(qm, i, 0, 0);
4039                 if (ret)
4040                         return ret;
4041         }
4042         qm->vfs_num = 0;
4043
4044         return 0;
4045 }
4046
4047 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
4048 {
4049         struct device *dev = &qm->pdev->dev;
4050         u32 ir = qos * QM_QOS_RATE;
4051         int ret, total_vfs, i;
4052
4053         total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4054         if (fun_index > total_vfs)
4055                 return -EINVAL;
4056
4057         qm->factor[fun_index].func_qos = qos;
4058
4059         ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
4060         if (ret) {
4061                 dev_err(dev, "failed to calculate shaper parameter!\n");
4062                 return -EINVAL;
4063         }
4064
4065         for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
4066                 /* The base number of queue reuse for different alg type */
4067                 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
4068                 if (ret) {
4069                         dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
4070                         return -EINVAL;
4071                 }
4072         }
4073
4074         return 0;
4075 }
4076
4077 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
4078 {
4079         u64 cir_u = 0, cir_b = 0, cir_s = 0;
4080         u64 shaper_vft, ir_calc, ir;
4081         unsigned int val;
4082         u32 error_rate;
4083         int ret;
4084
4085         ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4086                                          val & BIT(0), POLL_PERIOD,
4087                                          POLL_TIMEOUT);
4088         if (ret)
4089                 return 0;
4090
4091         writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
4092         writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
4093         writel(fun_index, qm->io_base + QM_VFT_CFG);
4094
4095         writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
4096         writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
4097
4098         ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4099                                          val & BIT(0), POLL_PERIOD,
4100                                          POLL_TIMEOUT);
4101         if (ret)
4102                 return 0;
4103
4104         shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
4105                   ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
4106
4107         cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
4108         cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
4109         cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
4110
4111         cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
4112         cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
4113
4114         ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
4115
4116         ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
4117
4118         error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
4119         if (error_rate > QM_QOS_MIN_ERROR_RATE) {
4120                 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
4121                 return 0;
4122         }
4123
4124         return ir;
4125 }
4126
4127 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
4128 {
4129         struct device *dev = &qm->pdev->dev;
4130         u64 mb_cmd;
4131         u32 qos;
4132         int ret;
4133
4134         qos = qm_get_shaper_vft_qos(qm, fun_num);
4135         if (!qos) {
4136                 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
4137                 return;
4138         }
4139
4140         mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
4141         ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
4142         if (ret)
4143                 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
4144 }
4145
4146 static int qm_vf_read_qos(struct hisi_qm *qm)
4147 {
4148         int cnt = 0;
4149         int ret;
4150
4151         /* reset mailbox qos val */
4152         qm->mb_qos = 0;
4153
4154         /* vf ping pf to get function qos */
4155         if (qm->ops->ping_pf) {
4156                 ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
4157                 if (ret) {
4158                         pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
4159                         return ret;
4160                 }
4161         }
4162
4163         while (true) {
4164                 msleep(QM_WAIT_DST_ACK);
4165                 if (qm->mb_qos)
4166                         break;
4167
4168                 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
4169                         pci_err(qm->pdev, "PF ping VF timeout!\n");
4170                         return  -ETIMEDOUT;
4171                 }
4172         }
4173
4174         return ret;
4175 }
4176
4177 static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
4178                                size_t count, loff_t *pos)
4179 {
4180         struct hisi_qm *qm = filp->private_data;
4181         char tbuf[QM_DBG_READ_LEN];
4182         u32 qos_val, ir;
4183         int ret;
4184
4185         ret = hisi_qm_get_dfx_access(qm);
4186         if (ret)
4187                 return ret;
4188
4189         /* Mailbox and reset cannot be operated at the same time */
4190         if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4191                 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
4192                 ret = -EAGAIN;
4193                 goto err_put_dfx_access;
4194         }
4195
4196         if (qm->fun_type == QM_HW_PF) {
4197                 ir = qm_get_shaper_vft_qos(qm, 0);
4198         } else {
4199                 ret = qm_vf_read_qos(qm);
4200                 if (ret)
4201                         goto err_get_status;
4202                 ir = qm->mb_qos;
4203         }
4204
4205         qos_val = ir / QM_QOS_RATE;
4206         ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
4207
4208         ret =  simple_read_from_buffer(buf, count, pos, tbuf, ret);
4209
4210 err_get_status:
4211         clear_bit(QM_RESETTING, &qm->misc_ctl);
4212 err_put_dfx_access:
4213         hisi_qm_put_dfx_access(qm);
4214         return ret;
4215 }
4216
4217 static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
4218 {
4219         int buflen = strlen(buf);
4220         int ret, i;
4221
4222         for (i = 0; i < buflen; i++) {
4223                 if (!isdigit(buf[i]))
4224                         return -EINVAL;
4225         }
4226
4227         ret = sscanf(buf, "%lu", val);
4228         if (ret != QM_QOS_VAL_NUM)
4229                 return -EINVAL;
4230
4231         return 0;
4232 }
4233
4234 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
4235                                size_t count, loff_t *pos)
4236 {
4237         struct hisi_qm *qm = filp->private_data;
4238         char tbuf[QM_DBG_READ_LEN];
4239         int tmp1, bus, device, function;
4240         char tbuf_bdf[QM_DBG_READ_LEN] = {0};
4241         char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
4242         unsigned int fun_index;
4243         unsigned long val = 0;
4244         int len, ret;
4245
4246         if (qm->fun_type == QM_HW_VF)
4247                 return -EINVAL;
4248
4249         /* Mailbox and reset cannot be operated at the same time */
4250         if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4251                 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
4252                 return -EAGAIN;
4253         }
4254
4255         if (*pos != 0) {
4256                 ret = 0;
4257                 goto err_get_status;
4258         }
4259
4260         if (count >= QM_DBG_READ_LEN) {
4261                 ret = -ENOSPC;
4262                 goto err_get_status;
4263         }
4264
4265         len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
4266         if (len < 0) {
4267                 ret = len;
4268                 goto err_get_status;
4269         }
4270
4271         tbuf[len] = '\0';
4272         ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf);
4273         if (ret != QM_QOS_PARAM_NUM) {
4274                 ret = -EINVAL;
4275                 goto err_get_status;
4276         }
4277
4278         ret = qm_qos_value_init(val_buf, &val);
4279         if (val == 0 || val > QM_QOS_MAX_VAL || ret) {
4280                 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
4281                 ret = -EINVAL;
4282                 goto err_get_status;
4283         }
4284
4285         ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function);
4286         if (ret != QM_QOS_BDF_PARAM_NUM) {
4287                 pci_err(qm->pdev, "input pci bdf value is error!\n");
4288                 ret = -EINVAL;
4289                 goto err_get_status;
4290         }
4291
4292         fun_index = device * 8 + function;
4293
4294         ret = qm_pm_get_sync(qm);
4295         if (ret) {
4296                 ret = -EINVAL;
4297                 goto err_get_status;
4298         }
4299
4300         ret = qm_func_shaper_enable(qm, fun_index, val);
4301         if (ret) {
4302                 pci_err(qm->pdev, "failed to enable function shaper!\n");
4303                 ret = -EINVAL;
4304                 goto err_put_sync;
4305         }
4306
4307         ret = count;
4308
4309 err_put_sync:
4310         qm_pm_put_sync(qm);
4311 err_get_status:
4312         clear_bit(QM_RESETTING, &qm->misc_ctl);
4313         return ret;
4314 }
4315
4316 static const struct file_operations qm_algqos_fops = {
4317         .owner = THIS_MODULE,
4318         .open = simple_open,
4319         .read = qm_algqos_read,
4320         .write = qm_algqos_write,
4321 };
4322
4323 /**
4324  * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
4325  * @qm: The qm for which we want to add debugfs files.
4326  *
4327  * Create function qos debugfs files.
4328  */
4329 static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
4330 {
4331         if (qm->fun_type == QM_HW_PF)
4332                 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
4333                                     qm, &qm_algqos_fops);
4334         else
4335                 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
4336                                     qm, &qm_algqos_fops);
4337 }
4338
4339 /**
4340  * hisi_qm_debug_init() - Initialize qm related debugfs files.
4341  * @qm: The qm for which we want to add debugfs files.
4342  *
4343  * Create qm related debugfs files.
4344  */
4345 void hisi_qm_debug_init(struct hisi_qm *qm)
4346 {
4347         struct qm_dfx *dfx = &qm->debug.dfx;
4348         struct dentry *qm_d;
4349         void *data;
4350         int i;
4351
4352         qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
4353         qm->debug.qm_d = qm_d;
4354
4355         /* only show this in PF */
4356         if (qm->fun_type == QM_HW_PF) {
4357                 qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
4358                 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
4359                         qm_create_debugfs_file(qm, qm->debug.qm_d, i);
4360         }
4361
4362         debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
4363
4364         debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
4365
4366         debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
4367                         &qm_status_fops);
4368         for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
4369                 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
4370                 debugfs_create_file(qm_dfx_files[i].name,
4371                         0644,
4372                         qm_d,
4373                         data,
4374                         &qm_atomic64_ops);
4375         }
4376
4377         if (qm->ver >= QM_HW_V3)
4378                 hisi_qm_set_algqos_init(qm);
4379 }
4380 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
4381
4382 /**
4383  * hisi_qm_debug_regs_clear() - clear qm debug related registers.
4384  * @qm: The qm for which we want to clear its debug registers.
4385  */
4386 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
4387 {
4388         const struct debugfs_reg32 *regs;
4389         int i;
4390
4391         /* clear current_qm */
4392         writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
4393         writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
4394
4395         /* clear current_q */
4396         writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
4397         writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
4398
4399         /*
4400          * these registers are reading and clearing, so clear them after
4401          * reading them.
4402          */
4403         writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
4404
4405         regs = qm_dfx_regs;
4406         for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
4407                 readl(qm->io_base + regs->offset);
4408                 regs++;
4409         }
4410
4411         /* clear clear_enable */
4412         writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
4413 }
4414 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
4415
4416 /**
4417  * hisi_qm_sriov_enable() - enable virtual functions
4418  * @pdev: the PCIe device
4419  * @max_vfs: the number of virtual functions to enable
4420  *
4421  * Returns the number of enabled VFs. If there are VFs enabled already or
4422  * max_vfs is more than the total number of device can be enabled, returns
4423  * failure.
4424  */
4425 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
4426 {
4427         struct hisi_qm *qm = pci_get_drvdata(pdev);
4428         int pre_existing_vfs, num_vfs, total_vfs, ret;
4429
4430         ret = qm_pm_get_sync(qm);
4431         if (ret)
4432                 return ret;
4433
4434         total_vfs = pci_sriov_get_totalvfs(pdev);
4435         pre_existing_vfs = pci_num_vf(pdev);
4436         if (pre_existing_vfs) {
4437                 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
4438                         pre_existing_vfs);
4439                 goto err_put_sync;
4440         }
4441
4442         num_vfs = min_t(int, max_vfs, total_vfs);
4443         ret = qm_vf_q_assign(qm, num_vfs);
4444         if (ret) {
4445                 pci_err(pdev, "Can't assign queues for VF!\n");
4446                 goto err_put_sync;
4447         }
4448
4449         qm->vfs_num = num_vfs;
4450
4451         ret = pci_enable_sriov(pdev, num_vfs);
4452         if (ret) {
4453                 pci_err(pdev, "Can't enable VF!\n");
4454                 qm_clear_vft_config(qm);
4455                 goto err_put_sync;
4456         }
4457
4458         pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
4459
4460         return num_vfs;
4461
4462 err_put_sync:
4463         qm_pm_put_sync(qm);
4464         return ret;
4465 }
4466 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
4467
4468 /**
4469  * hisi_qm_sriov_disable - disable virtual functions
4470  * @pdev: the PCI device.
4471  * @is_frozen: true when all the VFs are frozen.
4472  *
4473  * Return failure if there are VFs assigned already or VF is in used.
4474  */
4475 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
4476 {
4477         struct hisi_qm *qm = pci_get_drvdata(pdev);
4478         int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4479         int ret;
4480
4481         if (pci_vfs_assigned(pdev)) {
4482                 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
4483                 return -EPERM;
4484         }
4485
4486         /* While VF is in used, SRIOV cannot be disabled. */
4487         if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
4488                 pci_err(pdev, "Task is using its VF!\n");
4489                 return -EBUSY;
4490         }
4491
4492         pci_disable_sriov(pdev);
4493         /* clear vf function shaper configure array */
4494         memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
4495         ret = qm_clear_vft_config(qm);
4496         if (ret)
4497                 return ret;
4498
4499         qm_pm_put_sync(qm);
4500
4501         return 0;
4502 }
4503 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
4504
4505 /**
4506  * hisi_qm_sriov_configure - configure the number of VFs
4507  * @pdev: The PCI device
4508  * @num_vfs: The number of VFs need enabled
4509  *
4510  * Enable SR-IOV according to num_vfs, 0 means disable.
4511  */
4512 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
4513 {
4514         if (num_vfs == 0)
4515                 return hisi_qm_sriov_disable(pdev, false);
4516         else
4517                 return hisi_qm_sriov_enable(pdev, num_vfs);
4518 }
4519 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
4520
4521 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
4522 {
4523         u32 err_sts;
4524
4525         if (!qm->err_ini->get_dev_hw_err_status) {
4526                 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
4527                 return ACC_ERR_NONE;
4528         }
4529
4530         /* get device hardware error status */
4531         err_sts = qm->err_ini->get_dev_hw_err_status(qm);
4532         if (err_sts) {
4533                 if (err_sts & qm->err_info.ecc_2bits_mask)
4534                         qm->err_status.is_dev_ecc_mbit = true;
4535
4536                 if (qm->err_ini->log_dev_hw_err)
4537                         qm->err_ini->log_dev_hw_err(qm, err_sts);
4538
4539                 /* ce error does not need to be reset */
4540                 if ((err_sts | qm->err_info.dev_ce_mask) ==
4541                      qm->err_info.dev_ce_mask) {
4542                         if (qm->err_ini->clear_dev_hw_err_status)
4543                                 qm->err_ini->clear_dev_hw_err_status(qm,
4544                                                                 err_sts);
4545
4546                         return ACC_ERR_RECOVERED;
4547                 }
4548
4549                 return ACC_ERR_NEED_RESET;
4550         }
4551
4552         return ACC_ERR_RECOVERED;
4553 }
4554
4555 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
4556 {
4557         enum acc_err_result qm_ret, dev_ret;
4558
4559         /* log qm error */
4560         qm_ret = qm_hw_error_handle(qm);
4561
4562         /* log device error */
4563         dev_ret = qm_dev_err_handle(qm);
4564
4565         return (qm_ret == ACC_ERR_NEED_RESET ||
4566                 dev_ret == ACC_ERR_NEED_RESET) ?
4567                 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
4568 }
4569
4570 /**
4571  * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4572  * @pdev: The PCI device which need report error.
4573  * @state: The connectivity between CPU and device.
4574  *
4575  * We register this function into PCIe AER handlers, It will report device or
4576  * qm hardware error status when error occur.
4577  */
4578 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
4579                                           pci_channel_state_t state)
4580 {
4581         struct hisi_qm *qm = pci_get_drvdata(pdev);
4582         enum acc_err_result ret;
4583
4584         if (pdev->is_virtfn)
4585                 return PCI_ERS_RESULT_NONE;
4586
4587         pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
4588         if (state == pci_channel_io_perm_failure)
4589                 return PCI_ERS_RESULT_DISCONNECT;
4590
4591         ret = qm_process_dev_error(qm);
4592         if (ret == ACC_ERR_NEED_RESET)
4593                 return PCI_ERS_RESULT_NEED_RESET;
4594
4595         return PCI_ERS_RESULT_RECOVERED;
4596 }
4597 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
4598
4599 static int qm_check_req_recv(struct hisi_qm *qm)
4600 {
4601         struct pci_dev *pdev = qm->pdev;
4602         int ret;
4603         u32 val;
4604
4605         if (qm->ver >= QM_HW_V3)
4606                 return 0;
4607
4608         writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4609         ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4610                                          (val == ACC_VENDOR_ID_VALUE),
4611                                          POLL_PERIOD, POLL_TIMEOUT);
4612         if (ret) {
4613                 dev_err(&pdev->dev, "Fails to read QM reg!\n");
4614                 return ret;
4615         }
4616
4617         writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4618         ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4619                                          (val == PCI_VENDOR_ID_HUAWEI),
4620                                          POLL_PERIOD, POLL_TIMEOUT);
4621         if (ret)
4622                 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
4623
4624         return ret;
4625 }
4626
4627 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4628 {
4629         struct pci_dev *pdev = qm->pdev;
4630         u16 cmd;
4631         int i;
4632
4633         pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4634         if (set)
4635                 cmd |= PCI_COMMAND_MEMORY;
4636         else
4637                 cmd &= ~PCI_COMMAND_MEMORY;
4638
4639         pci_write_config_word(pdev, PCI_COMMAND, cmd);
4640         for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4641                 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4642                 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
4643                         return 0;
4644
4645                 udelay(1);
4646         }
4647
4648         return -ETIMEDOUT;
4649 }
4650
4651 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
4652 {
4653         struct pci_dev *pdev = qm->pdev;
4654         u16 sriov_ctrl;
4655         int pos;
4656         int i;
4657
4658         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4659         pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4660         if (set)
4661                 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
4662         else
4663                 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
4664         pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
4665
4666         for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4667                 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4668                 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
4669                     ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
4670                         return 0;
4671
4672                 udelay(1);
4673         }
4674
4675         return -ETIMEDOUT;
4676 }
4677
4678 static int qm_vf_reset_prepare(struct hisi_qm *qm,
4679                                enum qm_stop_reason stop_reason)
4680 {
4681         struct hisi_qm_list *qm_list = qm->qm_list;
4682         struct pci_dev *pdev = qm->pdev;
4683         struct pci_dev *virtfn;
4684         struct hisi_qm *vf_qm;
4685         int ret = 0;
4686
4687         mutex_lock(&qm_list->lock);
4688         list_for_each_entry(vf_qm, &qm_list->list, list) {
4689                 virtfn = vf_qm->pdev;
4690                 if (virtfn == pdev)
4691                         continue;
4692
4693                 if (pci_physfn(virtfn) == pdev) {
4694                         /* save VFs PCIE BAR configuration */
4695                         pci_save_state(virtfn);
4696
4697                         ret = hisi_qm_stop(vf_qm, stop_reason);
4698                         if (ret)
4699                                 goto stop_fail;
4700                 }
4701         }
4702
4703 stop_fail:
4704         mutex_unlock(&qm_list->lock);
4705         return ret;
4706 }
4707
4708 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
4709                            enum qm_stop_reason stop_reason)
4710 {
4711         struct pci_dev *pdev = qm->pdev;
4712         int ret;
4713
4714         if (!qm->vfs_num)
4715                 return 0;
4716
4717         /* Kunpeng930 supports to notify VFs to stop before PF reset */
4718         if (qm->ops->ping_all_vfs) {
4719                 ret = qm->ops->ping_all_vfs(qm, cmd);
4720                 if (ret)
4721                         pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
4722         } else {
4723                 ret = qm_vf_reset_prepare(qm, stop_reason);
4724                 if (ret)
4725                         pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
4726         }
4727
4728         return ret;
4729 }
4730
4731 static int qm_wait_reset_finish(struct hisi_qm *qm)
4732 {
4733         int delay = 0;
4734
4735         /* All reset requests need to be queued for processing */
4736         while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4737                 msleep(++delay);
4738                 if (delay > QM_RESET_WAIT_TIMEOUT)
4739                         return -EBUSY;
4740         }
4741
4742         return 0;
4743 }
4744
4745 static int qm_reset_prepare_ready(struct hisi_qm *qm)
4746 {
4747         struct pci_dev *pdev = qm->pdev;
4748         struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4749
4750         /*
4751          * PF and VF on host doesnot support resetting at the
4752          * same time on Kunpeng920.
4753          */
4754         if (qm->ver < QM_HW_V3)
4755                 return qm_wait_reset_finish(pf_qm);
4756
4757         return qm_wait_reset_finish(qm);
4758 }
4759
4760 static void qm_reset_bit_clear(struct hisi_qm *qm)
4761 {
4762         struct pci_dev *pdev = qm->pdev;
4763         struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4764
4765         if (qm->ver < QM_HW_V3)
4766                 clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
4767
4768         clear_bit(QM_RESETTING, &qm->misc_ctl);
4769 }
4770
4771 static int qm_controller_reset_prepare(struct hisi_qm *qm)
4772 {
4773         struct pci_dev *pdev = qm->pdev;
4774         int ret;
4775
4776         ret = qm_reset_prepare_ready(qm);
4777         if (ret) {
4778                 pci_err(pdev, "Controller reset not ready!\n");
4779                 return ret;
4780         }
4781
4782         /* PF obtains the information of VF by querying the register. */
4783         qm_cmd_uninit(qm);
4784
4785         /* Whether VFs stop successfully, soft reset will continue. */
4786         ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4787         if (ret)
4788                 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
4789
4790         ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4791         if (ret) {
4792                 pci_err(pdev, "Fails to stop QM!\n");
4793                 qm_reset_bit_clear(qm);
4794                 return ret;
4795         }
4796
4797         ret = qm_wait_vf_prepare_finish(qm);
4798         if (ret)
4799                 pci_err(pdev, "failed to stop by vfs in soft reset!\n");
4800
4801         clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4802
4803         return 0;
4804 }
4805
4806 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4807 {
4808         u32 nfe_enb = 0;
4809
4810         /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
4811         if (qm->ver >= QM_HW_V3)
4812                 return;
4813
4814         if (!qm->err_status.is_dev_ecc_mbit &&
4815             qm->err_status.is_qm_ecc_mbit &&
4816             qm->err_ini->close_axi_master_ooo) {
4817
4818                 qm->err_ini->close_axi_master_ooo(qm);
4819
4820         } else if (qm->err_status.is_dev_ecc_mbit &&
4821                    !qm->err_status.is_qm_ecc_mbit &&
4822                    !qm->err_ini->close_axi_master_ooo) {
4823
4824                 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4825                 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
4826                        qm->io_base + QM_RAS_NFE_ENABLE);
4827                 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4828         }
4829 }
4830
4831 static int qm_soft_reset(struct hisi_qm *qm)
4832 {
4833         struct pci_dev *pdev = qm->pdev;
4834         int ret;
4835         u32 val;
4836
4837         /* Ensure all doorbells and mailboxes received by QM */
4838         ret = qm_check_req_recv(qm);
4839         if (ret)
4840                 return ret;
4841
4842         if (qm->vfs_num) {
4843                 ret = qm_set_vf_mse(qm, false);
4844                 if (ret) {
4845                         pci_err(pdev, "Fails to disable vf MSE bit.\n");
4846                         return ret;
4847                 }
4848         }
4849
4850         ret = qm->ops->set_msi(qm, false);
4851         if (ret) {
4852                 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
4853                 return ret;
4854         }
4855
4856         qm_dev_ecc_mbit_handle(qm);
4857
4858         /* OOO register set and check */
4859         writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
4860                qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4861
4862         /* If bus lock, reset chip */
4863         ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4864                                          val,
4865                                          (val == ACC_MASTER_TRANS_RETURN_RW),
4866                                          POLL_PERIOD, POLL_TIMEOUT);
4867         if (ret) {
4868                 pci_emerg(pdev, "Bus lock! Please reset system.\n");
4869                 return ret;
4870         }
4871
4872         if (qm->err_ini->close_sva_prefetch)
4873                 qm->err_ini->close_sva_prefetch(qm);
4874
4875         ret = qm_set_pf_mse(qm, false);
4876         if (ret) {
4877                 pci_err(pdev, "Fails to disable pf MSE bit.\n");
4878                 return ret;
4879         }
4880
4881         /* The reset related sub-control registers are not in PCI BAR */
4882         if (ACPI_HANDLE(&pdev->dev)) {
4883                 unsigned long long value = 0;
4884                 acpi_status s;
4885
4886                 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
4887                                           qm->err_info.acpi_rst,
4888                                           NULL, &value);
4889                 if (ACPI_FAILURE(s)) {
4890                         pci_err(pdev, "NO controller reset method!\n");
4891                         return -EIO;
4892                 }
4893
4894                 if (value) {
4895                         pci_err(pdev, "Reset step %llu failed!\n", value);
4896                         return -EIO;
4897                 }
4898         } else {
4899                 pci_err(pdev, "No reset method!\n");
4900                 return -EINVAL;
4901         }
4902
4903         return 0;
4904 }
4905
4906 static int qm_vf_reset_done(struct hisi_qm *qm)
4907 {
4908         struct hisi_qm_list *qm_list = qm->qm_list;
4909         struct pci_dev *pdev = qm->pdev;
4910         struct pci_dev *virtfn;
4911         struct hisi_qm *vf_qm;
4912         int ret = 0;
4913
4914         mutex_lock(&qm_list->lock);
4915         list_for_each_entry(vf_qm, &qm_list->list, list) {
4916                 virtfn = vf_qm->pdev;
4917                 if (virtfn == pdev)
4918                         continue;
4919
4920                 if (pci_physfn(virtfn) == pdev) {
4921                         /* enable VFs PCIE BAR configuration */
4922                         pci_restore_state(virtfn);
4923
4924                         ret = qm_restart(vf_qm);
4925                         if (ret)
4926                                 goto restart_fail;
4927                 }
4928         }
4929
4930 restart_fail:
4931         mutex_unlock(&qm_list->lock);
4932         return ret;
4933 }
4934
4935 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
4936 {
4937         struct pci_dev *pdev = qm->pdev;
4938         int ret;
4939
4940         if (!qm->vfs_num)
4941                 return 0;
4942
4943         ret = qm_vf_q_assign(qm, qm->vfs_num);
4944         if (ret) {
4945                 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
4946                 return ret;
4947         }
4948
4949         /* Kunpeng930 supports to notify VFs to start after PF reset. */
4950         if (qm->ops->ping_all_vfs) {
4951                 ret = qm->ops->ping_all_vfs(qm, cmd);
4952                 if (ret)
4953                         pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
4954         } else {
4955                 ret = qm_vf_reset_done(qm);
4956                 if (ret)
4957                         pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
4958         }
4959
4960         return ret;
4961 }
4962
4963 static int qm_dev_hw_init(struct hisi_qm *qm)
4964 {
4965         return qm->err_ini->hw_init(qm);
4966 }
4967
4968 static void qm_restart_prepare(struct hisi_qm *qm)
4969 {
4970         u32 value;
4971
4972         if (qm->err_ini->open_sva_prefetch)
4973                 qm->err_ini->open_sva_prefetch(qm);
4974
4975         if (qm->ver >= QM_HW_V3)
4976                 return;
4977
4978         if (!qm->err_status.is_qm_ecc_mbit &&
4979             !qm->err_status.is_dev_ecc_mbit)
4980                 return;
4981
4982         /* temporarily close the OOO port used for PEH to write out MSI */
4983         value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4984         writel(value & ~qm->err_info.msi_wr_port,
4985                qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4986
4987         /* clear dev ecc 2bit error source if having */
4988         value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
4989         if (value && qm->err_ini->clear_dev_hw_err_status)
4990                 qm->err_ini->clear_dev_hw_err_status(qm, value);
4991
4992         /* clear QM ecc mbit error source */
4993         writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4994
4995         /* clear AM Reorder Buffer ecc mbit source */
4996         writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
4997 }
4998
4999 static void qm_restart_done(struct hisi_qm *qm)
5000 {
5001         u32 value;
5002
5003         if (qm->ver >= QM_HW_V3)
5004                 goto clear_flags;
5005
5006         if (!qm->err_status.is_qm_ecc_mbit &&
5007             !qm->err_status.is_dev_ecc_mbit)
5008                 return;
5009
5010         /* open the OOO port for PEH to write out MSI */
5011         value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5012         value |= qm->err_info.msi_wr_port;
5013         writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5014
5015 clear_flags:
5016         qm->err_status.is_qm_ecc_mbit = false;
5017         qm->err_status.is_dev_ecc_mbit = false;
5018 }
5019
5020 static int qm_controller_reset_done(struct hisi_qm *qm)
5021 {
5022         struct pci_dev *pdev = qm->pdev;
5023         int ret;
5024
5025         ret = qm->ops->set_msi(qm, true);
5026         if (ret) {
5027                 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
5028                 return ret;
5029         }
5030
5031         ret = qm_set_pf_mse(qm, true);
5032         if (ret) {
5033                 pci_err(pdev, "Fails to enable pf MSE bit!\n");
5034                 return ret;
5035         }
5036
5037         if (qm->vfs_num) {
5038                 ret = qm_set_vf_mse(qm, true);
5039                 if (ret) {
5040                         pci_err(pdev, "Fails to enable vf MSE bit!\n");
5041                         return ret;
5042                 }
5043         }
5044
5045         ret = qm_dev_hw_init(qm);
5046         if (ret) {
5047                 pci_err(pdev, "Failed to init device\n");
5048                 return ret;
5049         }
5050
5051         qm_restart_prepare(qm);
5052         hisi_qm_dev_err_init(qm);
5053         if (qm->err_ini->open_axi_master_ooo)
5054                 qm->err_ini->open_axi_master_ooo(qm);
5055
5056         ret = qm_restart(qm);
5057         if (ret) {
5058                 pci_err(pdev, "Failed to start QM!\n");
5059                 return ret;
5060         }
5061
5062         ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5063         if (ret)
5064                 pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
5065
5066         ret = qm_wait_vf_prepare_finish(qm);
5067         if (ret)
5068                 pci_err(pdev, "failed to start by vfs in soft reset!\n");
5069
5070         qm_cmd_init(qm);
5071         qm_restart_done(qm);
5072
5073         qm_reset_bit_clear(qm);
5074
5075         return 0;
5076 }
5077
5078 static int qm_controller_reset(struct hisi_qm *qm)
5079 {
5080         struct pci_dev *pdev = qm->pdev;
5081         int ret;
5082
5083         pci_info(pdev, "Controller resetting...\n");
5084
5085         ret = qm_controller_reset_prepare(qm);
5086         if (ret) {
5087                 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5088                 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5089                 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5090                 return ret;
5091         }
5092
5093         ret = qm_soft_reset(qm);
5094         if (ret) {
5095                 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5096                 qm_reset_bit_clear(qm);
5097                 return ret;
5098         }
5099
5100         ret = qm_controller_reset_done(qm);
5101         if (ret) {
5102                 qm_reset_bit_clear(qm);
5103                 return ret;
5104         }
5105
5106         pci_info(pdev, "Controller reset complete\n");
5107
5108         return 0;
5109 }
5110
5111 /**
5112  * hisi_qm_dev_slot_reset() - slot reset
5113  * @pdev: the PCIe device
5114  *
5115  * This function offers QM relate PCIe device reset interface. Drivers which
5116  * use QM can use this function as slot_reset in its struct pci_error_handlers.
5117  */
5118 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
5119 {
5120         struct hisi_qm *qm = pci_get_drvdata(pdev);
5121         int ret;
5122
5123         if (pdev->is_virtfn)
5124                 return PCI_ERS_RESULT_RECOVERED;
5125
5126         pci_aer_clear_nonfatal_status(pdev);
5127
5128         /* reset pcie device controller */
5129         ret = qm_controller_reset(qm);
5130         if (ret) {
5131                 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5132                 return PCI_ERS_RESULT_DISCONNECT;
5133         }
5134
5135         return PCI_ERS_RESULT_RECOVERED;
5136 }
5137 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
5138
5139 void hisi_qm_reset_prepare(struct pci_dev *pdev)
5140 {
5141         struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5142         struct hisi_qm *qm = pci_get_drvdata(pdev);
5143         u32 delay = 0;
5144         int ret;
5145
5146         hisi_qm_dev_err_uninit(pf_qm);
5147
5148         /*
5149          * Check whether there is an ECC mbit error, If it occurs, need to
5150          * wait for soft reset to fix it.
5151          */
5152         while (qm_check_dev_error(pf_qm)) {
5153                 msleep(++delay);
5154                 if (delay > QM_RESET_WAIT_TIMEOUT)
5155                         return;
5156         }
5157
5158         ret = qm_reset_prepare_ready(qm);
5159         if (ret) {
5160                 pci_err(pdev, "FLR not ready!\n");
5161                 return;
5162         }
5163
5164         /* PF obtains the information of VF by querying the register. */
5165         if (qm->fun_type == QM_HW_PF)
5166                 qm_cmd_uninit(qm);
5167
5168         ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
5169         if (ret)
5170                 pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
5171
5172         ret = hisi_qm_stop(qm, QM_FLR);
5173         if (ret) {
5174                 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
5175                 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5176                 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5177                 return;
5178         }
5179
5180         ret = qm_wait_vf_prepare_finish(qm);
5181         if (ret)
5182                 pci_err(pdev, "failed to stop by vfs in FLR!\n");
5183
5184         pci_info(pdev, "FLR resetting...\n");
5185 }
5186 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
5187
5188 static bool qm_flr_reset_complete(struct pci_dev *pdev)
5189 {
5190         struct pci_dev *pf_pdev = pci_physfn(pdev);
5191         struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
5192         u32 id;
5193
5194         pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
5195         if (id == QM_PCI_COMMAND_INVALID) {
5196                 pci_err(pdev, "Device can not be used!\n");
5197                 return false;
5198         }
5199
5200         return true;
5201 }
5202
5203 void hisi_qm_reset_done(struct pci_dev *pdev)
5204 {
5205         struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5206         struct hisi_qm *qm = pci_get_drvdata(pdev);
5207         int ret;
5208
5209         if (qm->fun_type == QM_HW_PF) {
5210                 ret = qm_dev_hw_init(qm);
5211                 if (ret) {
5212                         pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
5213                         goto flr_done;
5214                 }
5215         }
5216
5217         hisi_qm_dev_err_init(pf_qm);
5218
5219         ret = qm_restart(qm);
5220         if (ret) {
5221                 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
5222                 goto flr_done;
5223         }
5224
5225         ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5226         if (ret)
5227                 pci_err(pdev, "failed to start vfs by pf in FLR.\n");
5228
5229         ret = qm_wait_vf_prepare_finish(qm);
5230         if (ret)
5231                 pci_err(pdev, "failed to start by vfs in FLR!\n");
5232
5233 flr_done:
5234         if (qm->fun_type == QM_HW_PF)
5235                 qm_cmd_init(qm);
5236
5237         if (qm_flr_reset_complete(pdev))
5238                 pci_info(pdev, "FLR reset complete\n");
5239
5240         qm_reset_bit_clear(qm);
5241 }
5242 EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
5243
5244 static irqreturn_t qm_abnormal_irq(int irq, void *data)
5245 {
5246         struct hisi_qm *qm = data;
5247         enum acc_err_result ret;
5248
5249         atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
5250         ret = qm_process_dev_error(qm);
5251         if (ret == ACC_ERR_NEED_RESET &&
5252             !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
5253             !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
5254                 schedule_work(&qm->rst_work);
5255
5256         return IRQ_HANDLED;
5257 }
5258
5259 static int qm_irq_register(struct hisi_qm *qm)
5260 {
5261         struct pci_dev *pdev = qm->pdev;
5262         int ret;
5263
5264         ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
5265                           qm_irq, 0, qm->dev_name, qm);
5266         if (ret)
5267                 return ret;
5268
5269         if (qm->ver > QM_HW_V1) {
5270                 ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
5271                                   qm_aeq_irq, 0, qm->dev_name, qm);
5272                 if (ret)
5273                         goto err_aeq_irq;
5274
5275                 if (qm->fun_type == QM_HW_PF) {
5276                         ret = request_irq(pci_irq_vector(pdev,
5277                                           QM_ABNORMAL_EVENT_IRQ_VECTOR),
5278                                           qm_abnormal_irq, 0, qm->dev_name, qm);
5279                         if (ret)
5280                                 goto err_abonormal_irq;
5281                 }
5282         }
5283
5284         if (qm->ver > QM_HW_V2) {
5285                 ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
5286                                 qm_mb_cmd_irq, 0, qm->dev_name, qm);
5287                 if (ret)
5288                         goto err_mb_cmd_irq;
5289         }
5290
5291         return 0;
5292
5293 err_mb_cmd_irq:
5294         if (qm->fun_type == QM_HW_PF)
5295                 free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
5296 err_abonormal_irq:
5297         free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
5298 err_aeq_irq:
5299         free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
5300         return ret;
5301 }
5302
5303 /**
5304  * hisi_qm_dev_shutdown() - Shutdown device.
5305  * @pdev: The device will be shutdown.
5306  *
5307  * This function will stop qm when OS shutdown or rebooting.
5308  */
5309 void hisi_qm_dev_shutdown(struct pci_dev *pdev)
5310 {
5311         struct hisi_qm *qm = pci_get_drvdata(pdev);
5312         int ret;
5313
5314         ret = hisi_qm_stop(qm, QM_NORMAL);
5315         if (ret)
5316                 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
5317 }
5318 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
5319
5320 static void hisi_qm_controller_reset(struct work_struct *rst_work)
5321 {
5322         struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
5323         int ret;
5324
5325         ret = qm_pm_get_sync(qm);
5326         if (ret) {
5327                 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5328                 return;
5329         }
5330
5331         /* reset pcie device controller */
5332         ret = qm_controller_reset(qm);
5333         if (ret)
5334                 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
5335
5336         qm_pm_put_sync(qm);
5337 }
5338
5339 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
5340                                    enum qm_stop_reason stop_reason)
5341 {
5342         enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
5343         struct pci_dev *pdev = qm->pdev;
5344         int ret;
5345
5346         ret = qm_reset_prepare_ready(qm);
5347         if (ret) {
5348                 dev_err(&pdev->dev, "reset prepare not ready!\n");
5349                 atomic_set(&qm->status.flags, QM_STOP);
5350                 cmd = QM_VF_PREPARE_FAIL;
5351                 goto err_prepare;
5352         }
5353
5354         ret = hisi_qm_stop(qm, stop_reason);
5355         if (ret) {
5356                 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
5357                 atomic_set(&qm->status.flags, QM_STOP);
5358                 cmd = QM_VF_PREPARE_FAIL;
5359                 goto err_prepare;
5360         } else {
5361                 goto out;
5362         }
5363
5364 err_prepare:
5365         hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5366         hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5367 out:
5368         pci_save_state(pdev);
5369         ret = qm->ops->ping_pf(qm, cmd);
5370         if (ret)
5371                 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
5372 }
5373
5374 static void qm_pf_reset_vf_done(struct hisi_qm *qm)
5375 {
5376         enum qm_mb_cmd cmd = QM_VF_START_DONE;
5377         struct pci_dev *pdev = qm->pdev;
5378         int ret;
5379
5380         pci_restore_state(pdev);
5381         ret = hisi_qm_start(qm);
5382         if (ret) {
5383                 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
5384                 cmd = QM_VF_START_FAIL;
5385         }
5386
5387         ret = qm->ops->ping_pf(qm, cmd);
5388         if (ret)
5389                 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
5390
5391         qm_reset_bit_clear(qm);
5392 }
5393
5394 static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
5395 {
5396         struct device *dev = &qm->pdev->dev;
5397         u32 val, cmd;
5398         u64 msg;
5399         int ret;
5400
5401         /* Wait for reset to finish */
5402         ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
5403                                          val == BIT(0), QM_VF_RESET_WAIT_US,
5404                                          QM_VF_RESET_WAIT_TIMEOUT_US);
5405         /* hardware completion status should be available by this time */
5406         if (ret) {
5407                 dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
5408                 return -ETIMEDOUT;
5409         }
5410
5411         /*
5412          * Whether message is got successfully,
5413          * VF needs to ack PF by clearing the interrupt.
5414          */
5415         ret = qm_get_mb_cmd(qm, &msg, 0);
5416         qm_clear_cmd_interrupt(qm, 0);
5417         if (ret) {
5418                 dev_err(dev, "failed to get msg from PF in reset done!\n");
5419                 return ret;
5420         }
5421
5422         cmd = msg & QM_MB_CMD_DATA_MASK;
5423         if (cmd != QM_PF_RESET_DONE) {
5424                 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
5425                 ret = -EINVAL;
5426         }
5427
5428         return ret;
5429 }
5430
5431 static void qm_pf_reset_vf_process(struct hisi_qm *qm,
5432                                    enum qm_stop_reason stop_reason)
5433 {
5434         struct device *dev = &qm->pdev->dev;
5435         int ret;
5436
5437         dev_info(dev, "device reset start...\n");
5438
5439         /* The message is obtained by querying the register during resetting */
5440         qm_cmd_uninit(qm);
5441         qm_pf_reset_vf_prepare(qm, stop_reason);
5442
5443         ret = qm_wait_pf_reset_finish(qm);
5444         if (ret)
5445                 goto err_get_status;
5446
5447         qm_pf_reset_vf_done(qm);
5448         qm_cmd_init(qm);
5449
5450         dev_info(dev, "device reset done.\n");
5451
5452         return;
5453
5454 err_get_status:
5455         qm_cmd_init(qm);
5456         qm_reset_bit_clear(qm);
5457 }
5458
5459 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
5460 {
5461         struct device *dev = &qm->pdev->dev;
5462         u64 msg;
5463         u32 cmd;
5464         int ret;
5465
5466         /*
5467          * Get the msg from source by sending mailbox. Whether message is got
5468          * successfully, destination needs to ack source by clearing the interrupt.
5469          */
5470         ret = qm_get_mb_cmd(qm, &msg, fun_num);
5471         qm_clear_cmd_interrupt(qm, BIT(fun_num));
5472         if (ret) {
5473                 dev_err(dev, "failed to get msg from source!\n");
5474                 return;
5475         }
5476
5477         cmd = msg & QM_MB_CMD_DATA_MASK;
5478         switch (cmd) {
5479         case QM_PF_FLR_PREPARE:
5480                 qm_pf_reset_vf_process(qm, QM_FLR);
5481                 break;
5482         case QM_PF_SRST_PREPARE:
5483                 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
5484                 break;
5485         case QM_VF_GET_QOS:
5486                 qm_vf_get_qos(qm, fun_num);
5487                 break;
5488         case QM_PF_SET_QOS:
5489                 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
5490                 break;
5491         default:
5492                 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
5493                 break;
5494         }
5495 }
5496
5497 static void qm_cmd_process(struct work_struct *cmd_process)
5498 {
5499         struct hisi_qm *qm = container_of(cmd_process,
5500                                         struct hisi_qm, cmd_process);
5501         u32 vfs_num = qm->vfs_num;
5502         u64 val;
5503         u32 i;
5504
5505         if (qm->fun_type == QM_HW_PF) {
5506                 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
5507                 if (!val)
5508                         return;
5509
5510                 for (i = 1; i <= vfs_num; i++) {
5511                         if (val & BIT(i))
5512                                 qm_handle_cmd_msg(qm, i);
5513                 }
5514
5515                 return;
5516         }
5517
5518         qm_handle_cmd_msg(qm, 0);
5519 }
5520
5521 /**
5522  * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
5523  * @qm: The qm needs add.
5524  * @qm_list: The qm list.
5525  *
5526  * This function adds qm to qm list, and will register algorithm to
5527  * crypto when the qm list is empty.
5528  */
5529 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5530 {
5531         struct device *dev = &qm->pdev->dev;
5532         int flag = 0;
5533         int ret = 0;
5534
5535         mutex_lock(&qm_list->lock);
5536         if (list_empty(&qm_list->list))
5537                 flag = 1;
5538         list_add_tail(&qm->list, &qm_list->list);
5539         mutex_unlock(&qm_list->lock);
5540
5541         if (qm->ver <= QM_HW_V2 && qm->use_sva) {
5542                 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
5543                 return 0;
5544         }
5545
5546         if (flag) {
5547                 ret = qm_list->register_to_crypto(qm);
5548                 if (ret) {
5549                         mutex_lock(&qm_list->lock);
5550                         list_del(&qm->list);
5551                         mutex_unlock(&qm_list->lock);
5552                 }
5553         }
5554
5555         return ret;
5556 }
5557 EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
5558
5559 /**
5560  * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
5561  * qm list.
5562  * @qm: The qm needs delete.
5563  * @qm_list: The qm list.
5564  *
5565  * This function deletes qm from qm list, and will unregister algorithm
5566  * from crypto when the qm list is empty.
5567  */
5568 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5569 {
5570         mutex_lock(&qm_list->lock);
5571         list_del(&qm->list);
5572         mutex_unlock(&qm_list->lock);
5573
5574         if (qm->ver <= QM_HW_V2 && qm->use_sva)
5575                 return;
5576
5577         if (list_empty(&qm_list->list))
5578                 qm_list->unregister_from_crypto(qm);
5579 }
5580 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
5581
5582 static int qm_get_qp_num(struct hisi_qm *qm)
5583 {
5584         if (qm->ver == QM_HW_V1)
5585                 qm->ctrl_qp_num = QM_QNUM_V1;
5586         else if (qm->ver == QM_HW_V2)
5587                 qm->ctrl_qp_num = QM_QNUM_V2;
5588         else
5589                 qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
5590                                         QM_QP_NUN_MASK;
5591
5592         if (qm->use_db_isolation)
5593                 qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
5594                                   QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
5595         else
5596                 qm->max_qp_num = qm->ctrl_qp_num;
5597
5598         /* check if qp number is valid */
5599         if (qm->qp_num > qm->max_qp_num) {
5600                 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
5601                         qm->qp_num, qm->max_qp_num);
5602                 return -EINVAL;
5603         }
5604
5605         return 0;
5606 }
5607
5608 static int qm_get_pci_res(struct hisi_qm *qm)
5609 {
5610         struct pci_dev *pdev = qm->pdev;
5611         struct device *dev = &pdev->dev;
5612         int ret;
5613
5614         ret = pci_request_mem_regions(pdev, qm->dev_name);
5615         if (ret < 0) {
5616                 dev_err(dev, "Failed to request mem regions!\n");
5617                 return ret;
5618         }
5619
5620         qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5621         qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5622         if (!qm->io_base) {
5623                 ret = -EIO;
5624                 goto err_request_mem_regions;
5625         }
5626
5627         if (qm->ver > QM_HW_V2) {
5628                 if (qm->fun_type == QM_HW_PF)
5629                         qm->use_db_isolation = readl(qm->io_base +
5630                                                      QM_QUE_ISO_EN) & BIT(0);
5631                 else
5632                         qm->use_db_isolation = readl(qm->io_base +
5633                                                      QM_QUE_ISO_CFG_V) & BIT(0);
5634         }
5635
5636         if (qm->use_db_isolation) {
5637                 qm->db_interval = QM_QP_DB_INTERVAL;
5638                 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5639                 qm->db_io_base = ioremap(qm->db_phys_base,
5640                                          pci_resource_len(pdev, PCI_BAR_4));
5641                 if (!qm->db_io_base) {
5642                         ret = -EIO;
5643                         goto err_ioremap;
5644                 }
5645         } else {
5646                 qm->db_phys_base = qm->phys_base;
5647                 qm->db_io_base = qm->io_base;
5648                 qm->db_interval = 0;
5649         }
5650
5651         if (qm->fun_type == QM_HW_PF) {
5652                 ret = qm_get_qp_num(qm);
5653                 if (ret)
5654                         goto err_db_ioremap;
5655         }
5656
5657         return 0;
5658
5659 err_db_ioremap:
5660         if (qm->use_db_isolation)
5661                 iounmap(qm->db_io_base);
5662 err_ioremap:
5663         iounmap(qm->io_base);
5664 err_request_mem_regions:
5665         pci_release_mem_regions(pdev);
5666         return ret;
5667 }
5668
5669 static int hisi_qm_pci_init(struct hisi_qm *qm)
5670 {
5671         struct pci_dev *pdev = qm->pdev;
5672         struct device *dev = &pdev->dev;
5673         unsigned int num_vec;
5674         int ret;
5675
5676         ret = pci_enable_device_mem(pdev);
5677         if (ret < 0) {
5678                 dev_err(dev, "Failed to enable device mem!\n");
5679                 return ret;
5680         }
5681
5682         ret = qm_get_pci_res(qm);
5683         if (ret)
5684                 goto err_disable_pcidev;
5685
5686         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5687         if (ret < 0)
5688                 goto err_get_pci_res;
5689         pci_set_master(pdev);
5690
5691         if (!qm->ops->get_irq_num) {
5692                 ret = -EOPNOTSUPP;
5693                 goto err_get_pci_res;
5694         }
5695         num_vec = qm->ops->get_irq_num(qm);
5696         ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
5697         if (ret < 0) {
5698                 dev_err(dev, "Failed to enable MSI vectors!\n");
5699                 goto err_get_pci_res;
5700         }
5701
5702         return 0;
5703
5704 err_get_pci_res:
5705         qm_put_pci_res(qm);
5706 err_disable_pcidev:
5707         pci_disable_device(pdev);
5708         return ret;
5709 }
5710
5711 static void hisi_qm_init_work(struct hisi_qm *qm)
5712 {
5713         INIT_WORK(&qm->work, qm_work_process);
5714         if (qm->fun_type == QM_HW_PF)
5715                 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5716
5717         if (qm->ver > QM_HW_V2)
5718                 INIT_WORK(&qm->cmd_process, qm_cmd_process);
5719 }
5720
5721 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5722 {
5723         struct device *dev = &qm->pdev->dev;
5724         size_t qp_dma_size;
5725         int i, ret;
5726
5727         qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5728         if (!qm->qp_array)
5729                 return -ENOMEM;
5730
5731         /* one more page for device or qp statuses */
5732         qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
5733                       sizeof(struct qm_cqe) * QM_Q_DEPTH;
5734         qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
5735         for (i = 0; i < qm->qp_num; i++) {
5736                 ret = hisi_qp_memory_init(qm, qp_dma_size, i);
5737                 if (ret)
5738                         goto err_init_qp_mem;
5739
5740                 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
5741         }
5742
5743         return 0;
5744 err_init_qp_mem:
5745         hisi_qp_memory_uninit(qm, i);
5746
5747         return ret;
5748 }
5749
5750 static int hisi_qm_memory_init(struct hisi_qm *qm)
5751 {
5752         struct device *dev = &qm->pdev->dev;
5753         int ret, total_vfs;
5754         size_t off = 0;
5755
5756         total_vfs = pci_sriov_get_totalvfs(qm->pdev);
5757         qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5758         if (!qm->factor)
5759                 return -ENOMEM;
5760
5761 #define QM_INIT_BUF(qm, type, num) do { \
5762         (qm)->type = ((qm)->qdma.va + (off)); \
5763         (qm)->type##_dma = (qm)->qdma.dma + (off); \
5764         off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
5765 } while (0)
5766
5767         idr_init(&qm->qp_idr);
5768         qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
5769                         QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
5770                         QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5771                         QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5772         qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5773                                          GFP_ATOMIC);
5774         dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5775         if (!qm->qdma.va) {
5776                 ret =  -ENOMEM;
5777                 goto err_alloc_qdma;
5778         }
5779
5780         QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
5781         QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
5782         QM_INIT_BUF(qm, sqc, qm->qp_num);
5783         QM_INIT_BUF(qm, cqc, qm->qp_num);
5784
5785         ret = hisi_qp_alloc_memory(qm);
5786         if (ret)
5787                 goto err_alloc_qp_array;
5788
5789         return 0;
5790
5791 err_alloc_qp_array:
5792         dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5793 err_alloc_qdma:
5794         kfree(qm->factor);
5795
5796         return ret;
5797 }
5798
5799 /**
5800  * hisi_qm_init() - Initialize configures about qm.
5801  * @qm: The qm needing init.
5802  *
5803  * This function init qm, then we can call hisi_qm_start to put qm into work.
5804  */
5805 int hisi_qm_init(struct hisi_qm *qm)
5806 {
5807         struct pci_dev *pdev = qm->pdev;
5808         struct device *dev = &pdev->dev;
5809         int ret;
5810
5811         hisi_qm_pre_init(qm);
5812
5813         ret = hisi_qm_pci_init(qm);
5814         if (ret)
5815                 return ret;
5816
5817         ret = qm_irq_register(qm);
5818         if (ret)
5819                 goto err_pci_init;
5820
5821         if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
5822                 /* v2 starts to support get vft by mailbox */
5823                 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
5824                 if (ret)
5825                         goto err_irq_register;
5826         }
5827
5828         if (qm->mode == UACCE_MODE_SVA) {
5829                 ret = qm_alloc_uacce(qm);
5830                 if (ret < 0)
5831                         dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
5832         }
5833
5834         ret = hisi_qm_memory_init(qm);
5835         if (ret)
5836                 goto err_alloc_uacce;
5837
5838         hisi_qm_init_work(qm);
5839         qm_cmd_init(qm);
5840         atomic_set(&qm->status.flags, QM_INIT);
5841
5842         return 0;
5843
5844 err_alloc_uacce:
5845         if (qm->use_sva) {
5846                 uacce_remove(qm->uacce);
5847                 qm->uacce = NULL;
5848         }
5849 err_irq_register:
5850         qm_irq_unregister(qm);
5851 err_pci_init:
5852         hisi_qm_pci_uninit(qm);
5853         return ret;
5854 }
5855 EXPORT_SYMBOL_GPL(hisi_qm_init);
5856
5857 /**
5858  * hisi_qm_get_dfx_access() - Try to get dfx access.
5859  * @qm: pointer to accelerator device.
5860  *
5861  * Try to get dfx access, then user can get message.
5862  *
5863  * If device is in suspended, return failure, otherwise
5864  * bump up the runtime PM usage counter.
5865  */
5866 int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5867 {
5868         struct device *dev = &qm->pdev->dev;
5869
5870         if (pm_runtime_suspended(dev)) {
5871                 dev_info(dev, "can not read/write - device in suspended.\n");
5872                 return -EAGAIN;
5873         }
5874
5875         return qm_pm_get_sync(qm);
5876 }
5877 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
5878
5879 /**
5880  * hisi_qm_put_dfx_access() - Put dfx access.
5881  * @qm: pointer to accelerator device.
5882  *
5883  * Put dfx access, drop runtime PM usage counter.
5884  */
5885 void hisi_qm_put_dfx_access(struct hisi_qm *qm)
5886 {
5887         qm_pm_put_sync(qm);
5888 }
5889 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
5890
5891 /**
5892  * hisi_qm_pm_init() - Initialize qm runtime PM.
5893  * @qm: pointer to accelerator device.
5894  *
5895  * Function that initialize qm runtime PM.
5896  */
5897 void hisi_qm_pm_init(struct hisi_qm *qm)
5898 {
5899         struct device *dev = &qm->pdev->dev;
5900
5901         if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
5902                 return;
5903
5904         pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
5905         pm_runtime_use_autosuspend(dev);
5906         pm_runtime_put_noidle(dev);
5907 }
5908 EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
5909
5910 /**
5911  * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5912  * @qm: pointer to accelerator device.
5913  *
5914  * Function that uninitialize qm runtime PM.
5915  */
5916 void hisi_qm_pm_uninit(struct hisi_qm *qm)
5917 {
5918         struct device *dev = &qm->pdev->dev;
5919
5920         if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
5921                 return;
5922
5923         pm_runtime_get_noresume(dev);
5924         pm_runtime_dont_use_autosuspend(dev);
5925 }
5926 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
5927
5928 static int qm_prepare_for_suspend(struct hisi_qm *qm)
5929 {
5930         struct pci_dev *pdev = qm->pdev;
5931         int ret;
5932         u32 val;
5933
5934         ret = qm->ops->set_msi(qm, false);
5935         if (ret) {
5936                 pci_err(pdev, "failed to disable MSI before suspending!\n");
5937                 return ret;
5938         }
5939
5940         /* shutdown OOO register */
5941         writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
5942                qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5943
5944         ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
5945                                          val,
5946                                          (val == ACC_MASTER_TRANS_RETURN_RW),
5947                                          POLL_PERIOD, POLL_TIMEOUT);
5948         if (ret) {
5949                 pci_emerg(pdev, "Bus lock! Please reset system.\n");
5950                 return ret;
5951         }
5952
5953         ret = qm_set_pf_mse(qm, false);
5954         if (ret)
5955                 pci_err(pdev, "failed to disable MSE before suspending!\n");
5956
5957         return ret;
5958 }
5959
5960 static int qm_rebuild_for_resume(struct hisi_qm *qm)
5961 {
5962         struct pci_dev *pdev = qm->pdev;
5963         int ret;
5964
5965         ret = qm_set_pf_mse(qm, true);
5966         if (ret) {
5967                 pci_err(pdev, "failed to enable MSE after resuming!\n");
5968                 return ret;
5969         }
5970
5971         ret = qm->ops->set_msi(qm, true);
5972         if (ret) {
5973                 pci_err(pdev, "failed to enable MSI after resuming!\n");
5974                 return ret;
5975         }
5976
5977         ret = qm_dev_hw_init(qm);
5978         if (ret) {
5979                 pci_err(pdev, "failed to init device after resuming\n");
5980                 return ret;
5981         }
5982
5983         qm_cmd_init(qm);
5984         hisi_qm_dev_err_init(qm);
5985
5986         return 0;
5987 }
5988
5989 /**
5990  * hisi_qm_suspend() - Runtime suspend of given device.
5991  * @dev: device to suspend.
5992  *
5993  * Function that suspend the device.
5994  */
5995 int hisi_qm_suspend(struct device *dev)
5996 {
5997         struct pci_dev *pdev = to_pci_dev(dev);
5998         struct hisi_qm *qm = pci_get_drvdata(pdev);
5999         int ret;
6000
6001         pci_info(pdev, "entering suspended state\n");
6002
6003         ret = hisi_qm_stop(qm, QM_NORMAL);
6004         if (ret) {
6005                 pci_err(pdev, "failed to stop qm(%d)\n", ret);
6006                 return ret;
6007         }
6008
6009         ret = qm_prepare_for_suspend(qm);
6010         if (ret)
6011                 pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
6012
6013         return ret;
6014 }
6015 EXPORT_SYMBOL_GPL(hisi_qm_suspend);
6016
6017 /**
6018  * hisi_qm_resume() - Runtime resume of given device.
6019  * @dev: device to resume.
6020  *
6021  * Function that resume the device.
6022  */
6023 int hisi_qm_resume(struct device *dev)
6024 {
6025         struct pci_dev *pdev = to_pci_dev(dev);
6026         struct hisi_qm *qm = pci_get_drvdata(pdev);
6027         int ret;
6028
6029         pci_info(pdev, "resuming from suspend state\n");
6030
6031         ret = qm_rebuild_for_resume(qm);
6032         if (ret) {
6033                 pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
6034                 return ret;
6035         }
6036
6037         ret = hisi_qm_start(qm);
6038         if (ret)
6039                 pci_err(pdev, "failed to start qm(%d)\n", ret);
6040
6041         return 0;
6042 }
6043 EXPORT_SYMBOL_GPL(hisi_qm_resume);
6044
6045 MODULE_LICENSE("GPL v2");
6046 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
6047 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");