net/mlx5: Set command entry semaphore up once got index free
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlx5 / core / cmd.c
1 /*
2  * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/eq.h>
44 #include <linux/debugfs.h>
45
46 #include "mlx5_core.h"
47 #include "lib/eq.h"
48 #include "lib/tout.h"
49
50 enum {
51         CMD_IF_REV = 5,
52 };
53
54 enum {
55         CMD_MODE_POLLING,
56         CMD_MODE_EVENTS
57 };
58
59 enum {
60         MLX5_CMD_DELIVERY_STAT_OK                       = 0x0,
61         MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR               = 0x1,
62         MLX5_CMD_DELIVERY_STAT_TOK_ERR                  = 0x2,
63         MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR          = 0x3,
64         MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR        = 0x4,
65         MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR         = 0x5,
66         MLX5_CMD_DELIVERY_STAT_FW_ERR                   = 0x6,
67         MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR            = 0x7,
68         MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR           = 0x8,
69         MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR      = 0x9,
70         MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
71 };
72
73 static struct mlx5_cmd_work_ent *
74 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
75               struct mlx5_cmd_msg *out, void *uout, int uout_size,
76               mlx5_cmd_cbk_t cbk, void *context, int page_queue)
77 {
78         gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
79         struct mlx5_cmd_work_ent *ent;
80
81         ent = kzalloc(sizeof(*ent), alloc_flags);
82         if (!ent)
83                 return ERR_PTR(-ENOMEM);
84
85         ent->idx        = -EINVAL;
86         ent->in         = in;
87         ent->out        = out;
88         ent->uout       = uout;
89         ent->uout_size  = uout_size;
90         ent->callback   = cbk;
91         ent->context    = context;
92         ent->cmd        = cmd;
93         ent->page_queue = page_queue;
94         refcount_set(&ent->refcnt, 1);
95
96         return ent;
97 }
98
99 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
100 {
101         kfree(ent);
102 }
103
104 static u8 alloc_token(struct mlx5_cmd *cmd)
105 {
106         u8 token;
107
108         spin_lock(&cmd->token_lock);
109         cmd->token++;
110         if (cmd->token == 0)
111                 cmd->token++;
112         token = cmd->token;
113         spin_unlock(&cmd->token_lock);
114
115         return token;
116 }
117
118 static int cmd_alloc_index(struct mlx5_cmd *cmd)
119 {
120         unsigned long flags;
121         int ret;
122
123         spin_lock_irqsave(&cmd->alloc_lock, flags);
124         ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
125         if (ret < cmd->max_reg_cmds)
126                 clear_bit(ret, &cmd->bitmask);
127         spin_unlock_irqrestore(&cmd->alloc_lock, flags);
128
129         return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
130 }
131
132 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
133 {
134         unsigned long flags;
135
136         spin_lock_irqsave(&cmd->alloc_lock, flags);
137         set_bit(idx, &cmd->bitmask);
138         spin_unlock_irqrestore(&cmd->alloc_lock, flags);
139 }
140
141 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
142 {
143         refcount_inc(&ent->refcnt);
144 }
145
146 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
147 {
148         if (!refcount_dec_and_test(&ent->refcnt))
149                 return;
150
151         if (ent->idx >= 0) {
152                 struct mlx5_cmd *cmd = ent->cmd;
153
154                 cmd_free_index(cmd, ent->idx);
155                 up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
156         }
157
158         cmd_free_ent(ent);
159 }
160
161 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
162 {
163         return cmd->cmd_buf + (idx << cmd->log_stride);
164 }
165
166 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
167 {
168         int size = msg->len;
169         int blen = size - min_t(int, sizeof(msg->first.data), size);
170
171         return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
172 }
173
174 static u8 xor8_buf(void *buf, size_t offset, int len)
175 {
176         u8 *ptr = buf;
177         u8 sum = 0;
178         int i;
179         int end = len + offset;
180
181         for (i = offset; i < end; i++)
182                 sum ^= ptr[i];
183
184         return sum;
185 }
186
187 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
188 {
189         size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
190         int xor_len = sizeof(*block) - sizeof(block->data) - 1;
191
192         if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
193                 return -EINVAL;
194
195         if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
196                 return -EINVAL;
197
198         return 0;
199 }
200
201 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
202 {
203         int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
204         size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
205
206         block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
207         block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
208 }
209
210 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
211 {
212         struct mlx5_cmd_mailbox *next = msg->next;
213         int n = mlx5_calc_cmd_blocks(msg);
214         int i = 0;
215
216         for (i = 0; i < n && next; i++)  {
217                 calc_block_sig(next->buf);
218                 next = next->next;
219         }
220 }
221
222 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
223 {
224         ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
225         if (csum) {
226                 calc_chain_sig(ent->in);
227                 calc_chain_sig(ent->out);
228         }
229 }
230
231 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
232 {
233         struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
234         u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
235         unsigned long poll_end;
236         u8 own;
237
238         poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
239
240         do {
241                 own = READ_ONCE(ent->lay->status_own);
242                 if (!(own & CMD_OWNER_HW)) {
243                         ent->ret = 0;
244                         return;
245                 }
246                 cond_resched();
247         } while (time_before(jiffies, poll_end));
248
249         ent->ret = -ETIMEDOUT;
250 }
251
252 static int verify_signature(struct mlx5_cmd_work_ent *ent)
253 {
254         struct mlx5_cmd_mailbox *next = ent->out->next;
255         int n = mlx5_calc_cmd_blocks(ent->out);
256         int err;
257         u8 sig;
258         int i = 0;
259
260         sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
261         if (sig != 0xff)
262                 return -EINVAL;
263
264         for (i = 0; i < n && next; i++) {
265                 err = verify_block_sig(next->buf);
266                 if (err)
267                         return err;
268
269                 next = next->next;
270         }
271
272         return 0;
273 }
274
275 static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
276 {
277         __be32 *p = buf;
278         int i;
279
280         for (i = 0; i < size; i += 16) {
281                 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
282                          be32_to_cpu(p[0]), be32_to_cpu(p[1]),
283                          be32_to_cpu(p[2]), be32_to_cpu(p[3]));
284                 p += 4;
285                 offset += 16;
286         }
287         if (!data_only)
288                 pr_debug("\n");
289 }
290
291 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
292                                        u32 *synd, u8 *status)
293 {
294         *synd = 0;
295         *status = 0;
296
297         switch (op) {
298         case MLX5_CMD_OP_TEARDOWN_HCA:
299         case MLX5_CMD_OP_DISABLE_HCA:
300         case MLX5_CMD_OP_MANAGE_PAGES:
301         case MLX5_CMD_OP_DESTROY_MKEY:
302         case MLX5_CMD_OP_DESTROY_EQ:
303         case MLX5_CMD_OP_DESTROY_CQ:
304         case MLX5_CMD_OP_DESTROY_QP:
305         case MLX5_CMD_OP_DESTROY_PSV:
306         case MLX5_CMD_OP_DESTROY_SRQ:
307         case MLX5_CMD_OP_DESTROY_XRC_SRQ:
308         case MLX5_CMD_OP_DESTROY_XRQ:
309         case MLX5_CMD_OP_DESTROY_DCT:
310         case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
311         case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
312         case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
313         case MLX5_CMD_OP_DEALLOC_PD:
314         case MLX5_CMD_OP_DEALLOC_UAR:
315         case MLX5_CMD_OP_DETACH_FROM_MCG:
316         case MLX5_CMD_OP_DEALLOC_XRCD:
317         case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
318         case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
319         case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
320         case MLX5_CMD_OP_DESTROY_LAG:
321         case MLX5_CMD_OP_DESTROY_VPORT_LAG:
322         case MLX5_CMD_OP_DESTROY_TIR:
323         case MLX5_CMD_OP_DESTROY_SQ:
324         case MLX5_CMD_OP_DESTROY_RQ:
325         case MLX5_CMD_OP_DESTROY_RMP:
326         case MLX5_CMD_OP_DESTROY_TIS:
327         case MLX5_CMD_OP_DESTROY_RQT:
328         case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
329         case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
330         case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
331         case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
332         case MLX5_CMD_OP_2ERR_QP:
333         case MLX5_CMD_OP_2RST_QP:
334         case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
335         case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
336         case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
337         case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
338         case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
339         case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
340         case MLX5_CMD_OP_FPGA_DESTROY_QP:
341         case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
342         case MLX5_CMD_OP_DEALLOC_MEMIC:
343         case MLX5_CMD_OP_PAGE_FAULT_RESUME:
344         case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
345         case MLX5_CMD_OP_DEALLOC_SF:
346         case MLX5_CMD_OP_DESTROY_UCTX:
347         case MLX5_CMD_OP_DESTROY_UMEM:
348         case MLX5_CMD_OP_MODIFY_RQT:
349                 return MLX5_CMD_STAT_OK;
350
351         case MLX5_CMD_OP_QUERY_HCA_CAP:
352         case MLX5_CMD_OP_QUERY_ADAPTER:
353         case MLX5_CMD_OP_INIT_HCA:
354         case MLX5_CMD_OP_ENABLE_HCA:
355         case MLX5_CMD_OP_QUERY_PAGES:
356         case MLX5_CMD_OP_SET_HCA_CAP:
357         case MLX5_CMD_OP_QUERY_ISSI:
358         case MLX5_CMD_OP_SET_ISSI:
359         case MLX5_CMD_OP_CREATE_MKEY:
360         case MLX5_CMD_OP_QUERY_MKEY:
361         case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
362         case MLX5_CMD_OP_CREATE_EQ:
363         case MLX5_CMD_OP_QUERY_EQ:
364         case MLX5_CMD_OP_GEN_EQE:
365         case MLX5_CMD_OP_CREATE_CQ:
366         case MLX5_CMD_OP_QUERY_CQ:
367         case MLX5_CMD_OP_MODIFY_CQ:
368         case MLX5_CMD_OP_CREATE_QP:
369         case MLX5_CMD_OP_RST2INIT_QP:
370         case MLX5_CMD_OP_INIT2RTR_QP:
371         case MLX5_CMD_OP_RTR2RTS_QP:
372         case MLX5_CMD_OP_RTS2RTS_QP:
373         case MLX5_CMD_OP_SQERR2RTS_QP:
374         case MLX5_CMD_OP_QUERY_QP:
375         case MLX5_CMD_OP_SQD_RTS_QP:
376         case MLX5_CMD_OP_INIT2INIT_QP:
377         case MLX5_CMD_OP_CREATE_PSV:
378         case MLX5_CMD_OP_CREATE_SRQ:
379         case MLX5_CMD_OP_QUERY_SRQ:
380         case MLX5_CMD_OP_ARM_RQ:
381         case MLX5_CMD_OP_CREATE_XRC_SRQ:
382         case MLX5_CMD_OP_QUERY_XRC_SRQ:
383         case MLX5_CMD_OP_ARM_XRC_SRQ:
384         case MLX5_CMD_OP_CREATE_XRQ:
385         case MLX5_CMD_OP_QUERY_XRQ:
386         case MLX5_CMD_OP_ARM_XRQ:
387         case MLX5_CMD_OP_CREATE_DCT:
388         case MLX5_CMD_OP_DRAIN_DCT:
389         case MLX5_CMD_OP_QUERY_DCT:
390         case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
391         case MLX5_CMD_OP_QUERY_VPORT_STATE:
392         case MLX5_CMD_OP_MODIFY_VPORT_STATE:
393         case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
394         case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
395         case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
396         case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
397         case MLX5_CMD_OP_SET_ROCE_ADDRESS:
398         case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
399         case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
400         case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
401         case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
402         case MLX5_CMD_OP_QUERY_VNIC_ENV:
403         case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
404         case MLX5_CMD_OP_ALLOC_Q_COUNTER:
405         case MLX5_CMD_OP_QUERY_Q_COUNTER:
406         case MLX5_CMD_OP_SET_MONITOR_COUNTER:
407         case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
408         case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
409         case MLX5_CMD_OP_QUERY_RATE_LIMIT:
410         case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
411         case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
412         case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
413         case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
414         case MLX5_CMD_OP_ALLOC_PD:
415         case MLX5_CMD_OP_ALLOC_UAR:
416         case MLX5_CMD_OP_CONFIG_INT_MODERATION:
417         case MLX5_CMD_OP_ACCESS_REG:
418         case MLX5_CMD_OP_ATTACH_TO_MCG:
419         case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
420         case MLX5_CMD_OP_MAD_IFC:
421         case MLX5_CMD_OP_QUERY_MAD_DEMUX:
422         case MLX5_CMD_OP_SET_MAD_DEMUX:
423         case MLX5_CMD_OP_NOP:
424         case MLX5_CMD_OP_ALLOC_XRCD:
425         case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
426         case MLX5_CMD_OP_QUERY_CONG_STATUS:
427         case MLX5_CMD_OP_MODIFY_CONG_STATUS:
428         case MLX5_CMD_OP_QUERY_CONG_PARAMS:
429         case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
430         case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
431         case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
432         case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
433         case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
434         case MLX5_CMD_OP_CREATE_LAG:
435         case MLX5_CMD_OP_MODIFY_LAG:
436         case MLX5_CMD_OP_QUERY_LAG:
437         case MLX5_CMD_OP_CREATE_VPORT_LAG:
438         case MLX5_CMD_OP_CREATE_TIR:
439         case MLX5_CMD_OP_MODIFY_TIR:
440         case MLX5_CMD_OP_QUERY_TIR:
441         case MLX5_CMD_OP_CREATE_SQ:
442         case MLX5_CMD_OP_MODIFY_SQ:
443         case MLX5_CMD_OP_QUERY_SQ:
444         case MLX5_CMD_OP_CREATE_RQ:
445         case MLX5_CMD_OP_MODIFY_RQ:
446         case MLX5_CMD_OP_QUERY_RQ:
447         case MLX5_CMD_OP_CREATE_RMP:
448         case MLX5_CMD_OP_MODIFY_RMP:
449         case MLX5_CMD_OP_QUERY_RMP:
450         case MLX5_CMD_OP_CREATE_TIS:
451         case MLX5_CMD_OP_MODIFY_TIS:
452         case MLX5_CMD_OP_QUERY_TIS:
453         case MLX5_CMD_OP_CREATE_RQT:
454         case MLX5_CMD_OP_QUERY_RQT:
455
456         case MLX5_CMD_OP_CREATE_FLOW_TABLE:
457         case MLX5_CMD_OP_QUERY_FLOW_TABLE:
458         case MLX5_CMD_OP_CREATE_FLOW_GROUP:
459         case MLX5_CMD_OP_QUERY_FLOW_GROUP:
460         case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
461         case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
462         case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
463         case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
464         case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
465         case MLX5_CMD_OP_FPGA_CREATE_QP:
466         case MLX5_CMD_OP_FPGA_MODIFY_QP:
467         case MLX5_CMD_OP_FPGA_QUERY_QP:
468         case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
469         case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
470         case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
471         case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
472         case MLX5_CMD_OP_CREATE_UCTX:
473         case MLX5_CMD_OP_CREATE_UMEM:
474         case MLX5_CMD_OP_ALLOC_MEMIC:
475         case MLX5_CMD_OP_MODIFY_XRQ:
476         case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
477         case MLX5_CMD_OP_QUERY_VHCA_STATE:
478         case MLX5_CMD_OP_MODIFY_VHCA_STATE:
479         case MLX5_CMD_OP_ALLOC_SF:
480                 *status = MLX5_DRIVER_STATUS_ABORTED;
481                 *synd = MLX5_DRIVER_SYND;
482                 return -EIO;
483         default:
484                 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
485                 return -EINVAL;
486         }
487 }
488
489 const char *mlx5_command_str(int command)
490 {
491 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
492
493         switch (command) {
494         MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
495         MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
496         MLX5_COMMAND_STR_CASE(INIT_HCA);
497         MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
498         MLX5_COMMAND_STR_CASE(ENABLE_HCA);
499         MLX5_COMMAND_STR_CASE(DISABLE_HCA);
500         MLX5_COMMAND_STR_CASE(QUERY_PAGES);
501         MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
502         MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
503         MLX5_COMMAND_STR_CASE(QUERY_ISSI);
504         MLX5_COMMAND_STR_CASE(SET_ISSI);
505         MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
506         MLX5_COMMAND_STR_CASE(CREATE_MKEY);
507         MLX5_COMMAND_STR_CASE(QUERY_MKEY);
508         MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
509         MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
510         MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
511         MLX5_COMMAND_STR_CASE(CREATE_EQ);
512         MLX5_COMMAND_STR_CASE(DESTROY_EQ);
513         MLX5_COMMAND_STR_CASE(QUERY_EQ);
514         MLX5_COMMAND_STR_CASE(GEN_EQE);
515         MLX5_COMMAND_STR_CASE(CREATE_CQ);
516         MLX5_COMMAND_STR_CASE(DESTROY_CQ);
517         MLX5_COMMAND_STR_CASE(QUERY_CQ);
518         MLX5_COMMAND_STR_CASE(MODIFY_CQ);
519         MLX5_COMMAND_STR_CASE(CREATE_QP);
520         MLX5_COMMAND_STR_CASE(DESTROY_QP);
521         MLX5_COMMAND_STR_CASE(RST2INIT_QP);
522         MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
523         MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
524         MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
525         MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
526         MLX5_COMMAND_STR_CASE(2ERR_QP);
527         MLX5_COMMAND_STR_CASE(2RST_QP);
528         MLX5_COMMAND_STR_CASE(QUERY_QP);
529         MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
530         MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
531         MLX5_COMMAND_STR_CASE(CREATE_PSV);
532         MLX5_COMMAND_STR_CASE(DESTROY_PSV);
533         MLX5_COMMAND_STR_CASE(CREATE_SRQ);
534         MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
535         MLX5_COMMAND_STR_CASE(QUERY_SRQ);
536         MLX5_COMMAND_STR_CASE(ARM_RQ);
537         MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
538         MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
539         MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
540         MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
541         MLX5_COMMAND_STR_CASE(CREATE_DCT);
542         MLX5_COMMAND_STR_CASE(DESTROY_DCT);
543         MLX5_COMMAND_STR_CASE(DRAIN_DCT);
544         MLX5_COMMAND_STR_CASE(QUERY_DCT);
545         MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
546         MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
547         MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
548         MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
549         MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
550         MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
551         MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
552         MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
553         MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
554         MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
555         MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
556         MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
557         MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
558         MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
559         MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
560         MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
561         MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
562         MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
563         MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
564         MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
565         MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
566         MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
567         MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
568         MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
569         MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
570         MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
571         MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
572         MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
573         MLX5_COMMAND_STR_CASE(ALLOC_PD);
574         MLX5_COMMAND_STR_CASE(DEALLOC_PD);
575         MLX5_COMMAND_STR_CASE(ALLOC_UAR);
576         MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
577         MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
578         MLX5_COMMAND_STR_CASE(ACCESS_REG);
579         MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
580         MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
581         MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
582         MLX5_COMMAND_STR_CASE(MAD_IFC);
583         MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
584         MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
585         MLX5_COMMAND_STR_CASE(NOP);
586         MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
587         MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
588         MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
589         MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
590         MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
591         MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
592         MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
593         MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
594         MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
595         MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
596         MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
597         MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
598         MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
599         MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
600         MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
601         MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
602         MLX5_COMMAND_STR_CASE(CREATE_LAG);
603         MLX5_COMMAND_STR_CASE(MODIFY_LAG);
604         MLX5_COMMAND_STR_CASE(QUERY_LAG);
605         MLX5_COMMAND_STR_CASE(DESTROY_LAG);
606         MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
607         MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
608         MLX5_COMMAND_STR_CASE(CREATE_TIR);
609         MLX5_COMMAND_STR_CASE(MODIFY_TIR);
610         MLX5_COMMAND_STR_CASE(DESTROY_TIR);
611         MLX5_COMMAND_STR_CASE(QUERY_TIR);
612         MLX5_COMMAND_STR_CASE(CREATE_SQ);
613         MLX5_COMMAND_STR_CASE(MODIFY_SQ);
614         MLX5_COMMAND_STR_CASE(DESTROY_SQ);
615         MLX5_COMMAND_STR_CASE(QUERY_SQ);
616         MLX5_COMMAND_STR_CASE(CREATE_RQ);
617         MLX5_COMMAND_STR_CASE(MODIFY_RQ);
618         MLX5_COMMAND_STR_CASE(DESTROY_RQ);
619         MLX5_COMMAND_STR_CASE(QUERY_RQ);
620         MLX5_COMMAND_STR_CASE(CREATE_RMP);
621         MLX5_COMMAND_STR_CASE(MODIFY_RMP);
622         MLX5_COMMAND_STR_CASE(DESTROY_RMP);
623         MLX5_COMMAND_STR_CASE(QUERY_RMP);
624         MLX5_COMMAND_STR_CASE(CREATE_TIS);
625         MLX5_COMMAND_STR_CASE(MODIFY_TIS);
626         MLX5_COMMAND_STR_CASE(DESTROY_TIS);
627         MLX5_COMMAND_STR_CASE(QUERY_TIS);
628         MLX5_COMMAND_STR_CASE(CREATE_RQT);
629         MLX5_COMMAND_STR_CASE(MODIFY_RQT);
630         MLX5_COMMAND_STR_CASE(DESTROY_RQT);
631         MLX5_COMMAND_STR_CASE(QUERY_RQT);
632         MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
633         MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
634         MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
635         MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
636         MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
637         MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
638         MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
639         MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
640         MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
641         MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
642         MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
643         MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
644         MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
645         MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
646         MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
647         MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
648         MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
649         MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
650         MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
651         MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
652         MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
653         MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
654         MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
655         MLX5_COMMAND_STR_CASE(CREATE_XRQ);
656         MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
657         MLX5_COMMAND_STR_CASE(QUERY_XRQ);
658         MLX5_COMMAND_STR_CASE(ARM_XRQ);
659         MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
660         MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
661         MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
662         MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
663         MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
664         MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
665         MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
666         MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
667         MLX5_COMMAND_STR_CASE(CREATE_UCTX);
668         MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
669         MLX5_COMMAND_STR_CASE(CREATE_UMEM);
670         MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
671         MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
672         MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
673         MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
674         MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
675         MLX5_COMMAND_STR_CASE(ALLOC_SF);
676         MLX5_COMMAND_STR_CASE(DEALLOC_SF);
677         default: return "unknown command opcode";
678         }
679 }
680
681 static const char *cmd_status_str(u8 status)
682 {
683         switch (status) {
684         case MLX5_CMD_STAT_OK:
685                 return "OK";
686         case MLX5_CMD_STAT_INT_ERR:
687                 return "internal error";
688         case MLX5_CMD_STAT_BAD_OP_ERR:
689                 return "bad operation";
690         case MLX5_CMD_STAT_BAD_PARAM_ERR:
691                 return "bad parameter";
692         case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
693                 return "bad system state";
694         case MLX5_CMD_STAT_BAD_RES_ERR:
695                 return "bad resource";
696         case MLX5_CMD_STAT_RES_BUSY:
697                 return "resource busy";
698         case MLX5_CMD_STAT_LIM_ERR:
699                 return "limits exceeded";
700         case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
701                 return "bad resource state";
702         case MLX5_CMD_STAT_IX_ERR:
703                 return "bad index";
704         case MLX5_CMD_STAT_NO_RES_ERR:
705                 return "no resources";
706         case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
707                 return "bad input length";
708         case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
709                 return "bad output length";
710         case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
711                 return "bad QP state";
712         case MLX5_CMD_STAT_BAD_PKT_ERR:
713                 return "bad packet (discarded)";
714         case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
715                 return "bad size too many outstanding CQEs";
716         default:
717                 return "unknown status";
718         }
719 }
720
721 static int cmd_status_to_err(u8 status)
722 {
723         switch (status) {
724         case MLX5_CMD_STAT_OK:                          return 0;
725         case MLX5_CMD_STAT_INT_ERR:                     return -EIO;
726         case MLX5_CMD_STAT_BAD_OP_ERR:                  return -EINVAL;
727         case MLX5_CMD_STAT_BAD_PARAM_ERR:               return -EINVAL;
728         case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
729         case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
730         case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
731         case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
732         case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
733         case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
734         case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
735         case MLX5_CMD_STAT_BAD_INP_LEN_ERR:             return -EIO;
736         case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:            return -EIO;
737         case MLX5_CMD_STAT_BAD_QP_STATE_ERR:            return -EINVAL;
738         case MLX5_CMD_STAT_BAD_PKT_ERR:                 return -EINVAL;
739         case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:      return -EINVAL;
740         default:                                        return -EIO;
741         }
742 }
743
744 struct mlx5_ifc_mbox_out_bits {
745         u8         status[0x8];
746         u8         reserved_at_8[0x18];
747
748         u8         syndrome[0x20];
749
750         u8         reserved_at_40[0x40];
751 };
752
753 struct mlx5_ifc_mbox_in_bits {
754         u8         opcode[0x10];
755         u8         uid[0x10];
756
757         u8         reserved_at_20[0x10];
758         u8         op_mod[0x10];
759
760         u8         reserved_at_40[0x40];
761 };
762
763 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
764 {
765         *status = MLX5_GET(mbox_out, out, status);
766         *syndrome = MLX5_GET(mbox_out, out, syndrome);
767 }
768
769 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
770 {
771         u32 syndrome;
772         u8  status;
773         u16 opcode;
774         u16 op_mod;
775         u16 uid;
776
777         mlx5_cmd_mbox_status(out, &status, &syndrome);
778         if (!status)
779                 return 0;
780
781         opcode = MLX5_GET(mbox_in, in, opcode);
782         op_mod = MLX5_GET(mbox_in, in, op_mod);
783         uid    = MLX5_GET(mbox_in, in, uid);
784
785         if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
786                 mlx5_core_err_rl(dev,
787                         "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
788                         mlx5_command_str(opcode), opcode, op_mod,
789                         cmd_status_str(status), status, syndrome);
790         else
791                 mlx5_core_dbg(dev,
792                       "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
793                       mlx5_command_str(opcode),
794                       opcode, op_mod,
795                       cmd_status_str(status),
796                       status,
797                       syndrome);
798
799         return cmd_status_to_err(status);
800 }
801
802 static void dump_command(struct mlx5_core_dev *dev,
803                          struct mlx5_cmd_work_ent *ent, int input)
804 {
805         struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
806         u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
807         struct mlx5_cmd_mailbox *next = msg->next;
808         int n = mlx5_calc_cmd_blocks(msg);
809         int data_only;
810         u32 offset = 0;
811         int dump_len;
812         int i;
813
814         mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
815         data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
816
817         if (data_only)
818                 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
819                                    "cmd[%d]: dump command data %s(0x%x) %s\n",
820                                    ent->idx, mlx5_command_str(op), op,
821                                    input ? "INPUT" : "OUTPUT");
822         else
823                 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
824                               ent->idx, mlx5_command_str(op), op,
825                               input ? "INPUT" : "OUTPUT");
826
827         if (data_only) {
828                 if (input) {
829                         dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
830                         offset += sizeof(ent->lay->in);
831                 } else {
832                         dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
833                         offset += sizeof(ent->lay->out);
834                 }
835         } else {
836                 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
837                 offset += sizeof(*ent->lay);
838         }
839
840         for (i = 0; i < n && next; i++)  {
841                 if (data_only) {
842                         dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
843                         dump_buf(next->buf, dump_len, 1, offset, ent->idx);
844                         offset += MLX5_CMD_DATA_BLOCK_SIZE;
845                 } else {
846                         mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
847                         dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
848                                  ent->idx);
849                         offset += sizeof(struct mlx5_cmd_prot_block);
850                 }
851                 next = next->next;
852         }
853
854         if (data_only)
855                 pr_debug("\n");
856
857         mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
858 }
859
860 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
861 {
862         return MLX5_GET(mbox_in, in->first.data, opcode);
863 }
864
865 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
866
867 static void cb_timeout_handler(struct work_struct *work)
868 {
869         struct delayed_work *dwork = container_of(work, struct delayed_work,
870                                                   work);
871         struct mlx5_cmd_work_ent *ent = container_of(dwork,
872                                                      struct mlx5_cmd_work_ent,
873                                                      cb_timeout_work);
874         struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
875                                                  cmd);
876
877         mlx5_cmd_eq_recover(dev);
878
879         /* Maybe got handled by eq recover ? */
880         if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
881                 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
882                                mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
883                 goto out; /* phew, already handled */
884         }
885
886         ent->ret = -ETIMEDOUT;
887         mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
888                        ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
889         mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
890
891 out:
892         cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
893 }
894
895 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
896 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
897                               struct mlx5_cmd_msg *msg);
898
899 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
900 {
901         if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
902                 return true;
903
904         return cmd->allowed_opcode == opcode;
905 }
906
907 static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
908 {
909         unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
910         int idx;
911
912 retry:
913         idx = cmd_alloc_index(cmd);
914         if (idx < 0 && time_before(jiffies, alloc_end)) {
915                 /* Index allocation can fail on heavy load of commands. This is a temporary
916                  * situation as the current command already holds the semaphore, meaning that
917                  * another command completion is being handled and it is expected to release
918                  * the entry index soon.
919                  */
920                 cpu_relax();
921                 goto retry;
922         }
923         return idx;
924 }
925
926 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
927 {
928         return pci_channel_offline(dev->pdev) ||
929                dev->cmd.state != MLX5_CMDIF_STATE_UP ||
930                dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
931 }
932
933 static void cmd_work_handler(struct work_struct *work)
934 {
935         struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
936         struct mlx5_cmd *cmd = ent->cmd;
937         bool poll_cmd = ent->polling;
938         struct mlx5_cmd_layout *lay;
939         struct mlx5_core_dev *dev;
940         unsigned long cb_timeout;
941         struct semaphore *sem;
942         unsigned long flags;
943         int alloc_ret;
944         int cmd_mode;
945
946         dev = container_of(cmd, struct mlx5_core_dev, cmd);
947         cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
948
949         complete(&ent->handling);
950         sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
951         down(sem);
952         if (!ent->page_queue) {
953                 alloc_ret = cmd_alloc_index_retry(cmd);
954                 if (alloc_ret < 0) {
955                         mlx5_core_err_rl(dev, "failed to allocate command entry\n");
956                         if (ent->callback) {
957                                 ent->callback(-EAGAIN, ent->context);
958                                 mlx5_free_cmd_msg(dev, ent->out);
959                                 free_msg(dev, ent->in);
960                                 cmd_ent_put(ent);
961                         } else {
962                                 ent->ret = -EAGAIN;
963                                 complete(&ent->done);
964                         }
965                         up(sem);
966                         return;
967                 }
968                 ent->idx = alloc_ret;
969         } else {
970                 ent->idx = cmd->max_reg_cmds;
971                 spin_lock_irqsave(&cmd->alloc_lock, flags);
972                 clear_bit(ent->idx, &cmd->bitmask);
973                 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
974         }
975
976         cmd->ent_arr[ent->idx] = ent;
977         lay = get_inst(cmd, ent->idx);
978         ent->lay = lay;
979         memset(lay, 0, sizeof(*lay));
980         memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
981         ent->op = be32_to_cpu(lay->in[0]) >> 16;
982         if (ent->in->next)
983                 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
984         lay->inlen = cpu_to_be32(ent->in->len);
985         if (ent->out->next)
986                 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
987         lay->outlen = cpu_to_be32(ent->out->len);
988         lay->type = MLX5_PCI_CMD_XPORT;
989         lay->token = ent->token;
990         lay->status_own = CMD_OWNER_HW;
991         set_signature(ent, !cmd->checksum_disabled);
992         dump_command(dev, ent, 1);
993         ent->ts1 = ktime_get_ns();
994         cmd_mode = cmd->mode;
995
996         if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
997                 cmd_ent_get(ent);
998         set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
999
1000         /* Skip sending command to fw if internal error */
1001         if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
1002                 u8 status = 0;
1003                 u32 drv_synd;
1004
1005                 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
1006                 MLX5_SET(mbox_out, ent->out, status, status);
1007                 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
1008
1009                 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1010                 return;
1011         }
1012
1013         cmd_ent_get(ent); /* for the _real_ FW event on completion */
1014         /* ring doorbell after the descriptor is valid */
1015         mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
1016         wmb();
1017         iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
1018         /* if not in polling don't use ent after this point */
1019         if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
1020                 poll_timeout(ent);
1021                 /* make sure we read the descriptor after ownership is SW */
1022                 rmb();
1023                 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
1024         }
1025 }
1026
1027 static const char *deliv_status_to_str(u8 status)
1028 {
1029         switch (status) {
1030         case MLX5_CMD_DELIVERY_STAT_OK:
1031                 return "no errors";
1032         case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1033                 return "signature error";
1034         case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1035                 return "token error";
1036         case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1037                 return "bad block number";
1038         case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1039                 return "output pointer not aligned to block size";
1040         case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1041                 return "input pointer not aligned to block size";
1042         case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1043                 return "firmware internal error";
1044         case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1045                 return "command input length error";
1046         case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1047                 return "command output length error";
1048         case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1049                 return "reserved fields not cleared";
1050         case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1051                 return "bad command descriptor type";
1052         default:
1053                 return "unknown status code";
1054         }
1055 }
1056
1057 enum {
1058         MLX5_CMD_TIMEOUT_RECOVER_MSEC   = 5 * 1000,
1059 };
1060
1061 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
1062                                           struct mlx5_cmd_work_ent *ent)
1063 {
1064         unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
1065
1066         mlx5_cmd_eq_recover(dev);
1067
1068         /* Re-wait on the ent->done after executing the recovery flow. If the
1069          * recovery flow (or any other recovery flow running simultaneously)
1070          * has recovered an EQE, it should cause the entry to be completed by
1071          * the command interface.
1072          */
1073         if (wait_for_completion_timeout(&ent->done, timeout)) {
1074                 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
1075                                mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1076                 return;
1077         }
1078
1079         mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
1080                        mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1081
1082         ent->ret = -ETIMEDOUT;
1083         mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1084 }
1085
1086 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1087 {
1088         unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
1089         struct mlx5_cmd *cmd = &dev->cmd;
1090         int err;
1091
1092         if (!wait_for_completion_timeout(&ent->handling, timeout) &&
1093             cancel_work_sync(&ent->work)) {
1094                 ent->ret = -ECANCELED;
1095                 goto out_err;
1096         }
1097         if (cmd->mode == CMD_MODE_POLLING || ent->polling)
1098                 wait_for_completion(&ent->done);
1099         else if (!wait_for_completion_timeout(&ent->done, timeout))
1100                 wait_func_handle_exec_timeout(dev, ent);
1101
1102 out_err:
1103         err = ent->ret;
1104
1105         if (err == -ETIMEDOUT) {
1106                 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1107                                mlx5_command_str(msg_to_opcode(ent->in)),
1108                                msg_to_opcode(ent->in));
1109         } else if (err == -ECANCELED) {
1110                 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1111                                mlx5_command_str(msg_to_opcode(ent->in)),
1112                                msg_to_opcode(ent->in));
1113         }
1114         mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1115                       err, deliv_status_to_str(ent->status), ent->status);
1116
1117         return err;
1118 }
1119
1120 /*  Notes:
1121  *    1. Callback functions may not sleep
1122  *    2. page queue commands do not support asynchrous completion
1123  */
1124 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1125                            struct mlx5_cmd_msg *out, void *uout, int uout_size,
1126                            mlx5_cmd_cbk_t callback,
1127                            void *context, int page_queue, u8 *status,
1128                            u8 token, bool force_polling)
1129 {
1130         struct mlx5_cmd *cmd = &dev->cmd;
1131         struct mlx5_cmd_work_ent *ent;
1132         struct mlx5_cmd_stats *stats;
1133         int err = 0;
1134         s64 ds;
1135         u16 op;
1136
1137         if (callback && page_queue)
1138                 return -EINVAL;
1139
1140         ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
1141                             callback, context, page_queue);
1142         if (IS_ERR(ent))
1143                 return PTR_ERR(ent);
1144
1145         /* put for this ent is when consumed, depending on the use case
1146          * 1) (!callback) blocking flow: by caller after wait_func completes
1147          * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1148          */
1149
1150         ent->token = token;
1151         ent->polling = force_polling;
1152
1153         init_completion(&ent->handling);
1154         if (!callback)
1155                 init_completion(&ent->done);
1156
1157         INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1158         INIT_WORK(&ent->work, cmd_work_handler);
1159         if (page_queue) {
1160                 cmd_work_handler(&ent->work);
1161         } else if (!queue_work(cmd->wq, &ent->work)) {
1162                 mlx5_core_warn(dev, "failed to queue work\n");
1163                 err = -ENOMEM;
1164                 goto out_free;
1165         }
1166
1167         if (callback)
1168                 goto out; /* mlx5_cmd_comp_handler() will put(ent) */
1169
1170         err = wait_func(dev, ent);
1171         if (err == -ETIMEDOUT || err == -ECANCELED)
1172                 goto out_free;
1173
1174         ds = ent->ts2 - ent->ts1;
1175         op = MLX5_GET(mbox_in, in->first.data, opcode);
1176         if (op < MLX5_CMD_OP_MAX) {
1177                 stats = &cmd->stats[op];
1178                 spin_lock_irq(&stats->lock);
1179                 stats->sum += ds;
1180                 ++stats->n;
1181                 spin_unlock_irq(&stats->lock);
1182         }
1183         mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1184                            "fw exec time for %s is %lld nsec\n",
1185                            mlx5_command_str(op), ds);
1186         *status = ent->status;
1187
1188 out_free:
1189         cmd_ent_put(ent);
1190 out:
1191         return err;
1192 }
1193
1194 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1195                          size_t count, loff_t *pos)
1196 {
1197         struct mlx5_core_dev *dev = filp->private_data;
1198         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1199         char lbuf[3];
1200         int err;
1201
1202         if (!dbg->in_msg || !dbg->out_msg)
1203                 return -ENOMEM;
1204
1205         if (count < sizeof(lbuf) - 1)
1206                 return -EINVAL;
1207
1208         if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1209                 return -EFAULT;
1210
1211         lbuf[sizeof(lbuf) - 1] = 0;
1212
1213         if (strcmp(lbuf, "go"))
1214                 return -EINVAL;
1215
1216         err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1217
1218         return err ? err : count;
1219 }
1220
1221 static const struct file_operations fops = {
1222         .owner  = THIS_MODULE,
1223         .open   = simple_open,
1224         .write  = dbg_write,
1225 };
1226
1227 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1228                             u8 token)
1229 {
1230         struct mlx5_cmd_prot_block *block;
1231         struct mlx5_cmd_mailbox *next;
1232         int copy;
1233
1234         if (!to || !from)
1235                 return -ENOMEM;
1236
1237         copy = min_t(int, size, sizeof(to->first.data));
1238         memcpy(to->first.data, from, copy);
1239         size -= copy;
1240         from += copy;
1241
1242         next = to->next;
1243         while (size) {
1244                 if (!next) {
1245                         /* this is a BUG */
1246                         return -ENOMEM;
1247                 }
1248
1249                 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1250                 block = next->buf;
1251                 memcpy(block->data, from, copy);
1252                 from += copy;
1253                 size -= copy;
1254                 block->token = token;
1255                 next = next->next;
1256         }
1257
1258         return 0;
1259 }
1260
1261 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1262 {
1263         struct mlx5_cmd_prot_block *block;
1264         struct mlx5_cmd_mailbox *next;
1265         int copy;
1266
1267         if (!to || !from)
1268                 return -ENOMEM;
1269
1270         copy = min_t(int, size, sizeof(from->first.data));
1271         memcpy(to, from->first.data, copy);
1272         size -= copy;
1273         to += copy;
1274
1275         next = from->next;
1276         while (size) {
1277                 if (!next) {
1278                         /* this is a BUG */
1279                         return -ENOMEM;
1280                 }
1281
1282                 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1283                 block = next->buf;
1284
1285                 memcpy(to, block->data, copy);
1286                 to += copy;
1287                 size -= copy;
1288                 next = next->next;
1289         }
1290
1291         return 0;
1292 }
1293
1294 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1295                                               gfp_t flags)
1296 {
1297         struct mlx5_cmd_mailbox *mailbox;
1298
1299         mailbox = kmalloc(sizeof(*mailbox), flags);
1300         if (!mailbox)
1301                 return ERR_PTR(-ENOMEM);
1302
1303         mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1304                                        &mailbox->dma);
1305         if (!mailbox->buf) {
1306                 mlx5_core_dbg(dev, "failed allocation\n");
1307                 kfree(mailbox);
1308                 return ERR_PTR(-ENOMEM);
1309         }
1310         mailbox->next = NULL;
1311
1312         return mailbox;
1313 }
1314
1315 static void free_cmd_box(struct mlx5_core_dev *dev,
1316                          struct mlx5_cmd_mailbox *mailbox)
1317 {
1318         dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1319         kfree(mailbox);
1320 }
1321
1322 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1323                                                gfp_t flags, int size,
1324                                                u8 token)
1325 {
1326         struct mlx5_cmd_mailbox *tmp, *head = NULL;
1327         struct mlx5_cmd_prot_block *block;
1328         struct mlx5_cmd_msg *msg;
1329         int err;
1330         int n;
1331         int i;
1332
1333         msg = kzalloc(sizeof(*msg), flags);
1334         if (!msg)
1335                 return ERR_PTR(-ENOMEM);
1336
1337         msg->len = size;
1338         n = mlx5_calc_cmd_blocks(msg);
1339
1340         for (i = 0; i < n; i++) {
1341                 tmp = alloc_cmd_box(dev, flags);
1342                 if (IS_ERR(tmp)) {
1343                         mlx5_core_warn(dev, "failed allocating block\n");
1344                         err = PTR_ERR(tmp);
1345                         goto err_alloc;
1346                 }
1347
1348                 block = tmp->buf;
1349                 tmp->next = head;
1350                 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1351                 block->block_num = cpu_to_be32(n - i - 1);
1352                 block->token = token;
1353                 head = tmp;
1354         }
1355         msg->next = head;
1356         return msg;
1357
1358 err_alloc:
1359         while (head) {
1360                 tmp = head->next;
1361                 free_cmd_box(dev, head);
1362                 head = tmp;
1363         }
1364         kfree(msg);
1365
1366         return ERR_PTR(err);
1367 }
1368
1369 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1370                               struct mlx5_cmd_msg *msg)
1371 {
1372         struct mlx5_cmd_mailbox *head = msg->next;
1373         struct mlx5_cmd_mailbox *next;
1374
1375         while (head) {
1376                 next = head->next;
1377                 free_cmd_box(dev, head);
1378                 head = next;
1379         }
1380         kfree(msg);
1381 }
1382
1383 static ssize_t data_write(struct file *filp, const char __user *buf,
1384                           size_t count, loff_t *pos)
1385 {
1386         struct mlx5_core_dev *dev = filp->private_data;
1387         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1388         void *ptr;
1389
1390         if (*pos != 0)
1391                 return -EINVAL;
1392
1393         kfree(dbg->in_msg);
1394         dbg->in_msg = NULL;
1395         dbg->inlen = 0;
1396         ptr = memdup_user(buf, count);
1397         if (IS_ERR(ptr))
1398                 return PTR_ERR(ptr);
1399         dbg->in_msg = ptr;
1400         dbg->inlen = count;
1401
1402         *pos = count;
1403
1404         return count;
1405 }
1406
1407 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1408                          loff_t *pos)
1409 {
1410         struct mlx5_core_dev *dev = filp->private_data;
1411         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1412
1413         if (!dbg->out_msg)
1414                 return -ENOMEM;
1415
1416         return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1417                                        dbg->outlen);
1418 }
1419
1420 static const struct file_operations dfops = {
1421         .owner  = THIS_MODULE,
1422         .open   = simple_open,
1423         .write  = data_write,
1424         .read   = data_read,
1425 };
1426
1427 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1428                            loff_t *pos)
1429 {
1430         struct mlx5_core_dev *dev = filp->private_data;
1431         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1432         char outlen[8];
1433         int err;
1434
1435         err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1436         if (err < 0)
1437                 return err;
1438
1439         return simple_read_from_buffer(buf, count, pos, outlen, err);
1440 }
1441
1442 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1443                             size_t count, loff_t *pos)
1444 {
1445         struct mlx5_core_dev *dev = filp->private_data;
1446         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1447         char outlen_str[8] = {0};
1448         int outlen;
1449         void *ptr;
1450         int err;
1451
1452         if (*pos != 0 || count > 6)
1453                 return -EINVAL;
1454
1455         kfree(dbg->out_msg);
1456         dbg->out_msg = NULL;
1457         dbg->outlen = 0;
1458
1459         if (copy_from_user(outlen_str, buf, count))
1460                 return -EFAULT;
1461
1462         err = sscanf(outlen_str, "%d", &outlen);
1463         if (err < 0)
1464                 return err;
1465
1466         ptr = kzalloc(outlen, GFP_KERNEL);
1467         if (!ptr)
1468                 return -ENOMEM;
1469
1470         dbg->out_msg = ptr;
1471         dbg->outlen = outlen;
1472
1473         *pos = count;
1474
1475         return count;
1476 }
1477
1478 static const struct file_operations olfops = {
1479         .owner  = THIS_MODULE,
1480         .open   = simple_open,
1481         .write  = outlen_write,
1482         .read   = outlen_read,
1483 };
1484
1485 static void set_wqname(struct mlx5_core_dev *dev)
1486 {
1487         struct mlx5_cmd *cmd = &dev->cmd;
1488
1489         snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1490                  dev_name(dev->device));
1491 }
1492
1493 static void clean_debug_files(struct mlx5_core_dev *dev)
1494 {
1495         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1496
1497         if (!mlx5_debugfs_root)
1498                 return;
1499
1500         mlx5_cmdif_debugfs_cleanup(dev);
1501         debugfs_remove_recursive(dbg->dbg_root);
1502 }
1503
1504 static void create_debugfs_files(struct mlx5_core_dev *dev)
1505 {
1506         struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1507
1508         dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1509
1510         debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1511         debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1512         debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1513         debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1514         debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1515
1516         mlx5_cmdif_debugfs_init(dev);
1517 }
1518
1519 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1520 {
1521         struct mlx5_cmd *cmd = &dev->cmd;
1522         int i;
1523
1524         for (i = 0; i < cmd->max_reg_cmds; i++)
1525                 down(&cmd->sem);
1526         down(&cmd->pages_sem);
1527
1528         cmd->allowed_opcode = opcode;
1529
1530         up(&cmd->pages_sem);
1531         for (i = 0; i < cmd->max_reg_cmds; i++)
1532                 up(&cmd->sem);
1533 }
1534
1535 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1536 {
1537         struct mlx5_cmd *cmd = &dev->cmd;
1538         int i;
1539
1540         for (i = 0; i < cmd->max_reg_cmds; i++)
1541                 down(&cmd->sem);
1542         down(&cmd->pages_sem);
1543
1544         cmd->mode = mode;
1545
1546         up(&cmd->pages_sem);
1547         for (i = 0; i < cmd->max_reg_cmds; i++)
1548                 up(&cmd->sem);
1549 }
1550
1551 static int cmd_comp_notifier(struct notifier_block *nb,
1552                              unsigned long type, void *data)
1553 {
1554         struct mlx5_core_dev *dev;
1555         struct mlx5_cmd *cmd;
1556         struct mlx5_eqe *eqe;
1557
1558         cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1559         dev = container_of(cmd, struct mlx5_core_dev, cmd);
1560         eqe = data;
1561
1562         mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1563
1564         return NOTIFY_OK;
1565 }
1566 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1567 {
1568         MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1569         mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1570         mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1571 }
1572
1573 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1574 {
1575         mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1576         mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1577 }
1578
1579 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1580 {
1581         unsigned long flags;
1582
1583         if (msg->parent) {
1584                 spin_lock_irqsave(&msg->parent->lock, flags);
1585                 list_add_tail(&msg->list, &msg->parent->head);
1586                 spin_unlock_irqrestore(&msg->parent->lock, flags);
1587         } else {
1588                 mlx5_free_cmd_msg(dev, msg);
1589         }
1590 }
1591
1592 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1593 {
1594         struct mlx5_cmd *cmd = &dev->cmd;
1595         struct mlx5_cmd_work_ent *ent;
1596         mlx5_cmd_cbk_t callback;
1597         void *context;
1598         int err;
1599         int i;
1600         s64 ds;
1601         struct mlx5_cmd_stats *stats;
1602         unsigned long flags;
1603         unsigned long vector;
1604
1605         /* there can be at most 32 command queues */
1606         vector = vec & 0xffffffff;
1607         for (i = 0; i < (1 << cmd->log_sz); i++) {
1608                 if (test_bit(i, &vector)) {
1609                         ent = cmd->ent_arr[i];
1610
1611                         /* if we already completed the command, ignore it */
1612                         if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1613                                                 &ent->state)) {
1614                                 /* only real completion can free the cmd slot */
1615                                 if (!forced) {
1616                                         mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1617                                                       ent->idx);
1618                                         cmd_ent_put(ent);
1619                                 }
1620                                 continue;
1621                         }
1622
1623                         if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
1624                                 cmd_ent_put(ent); /* timeout work was canceled */
1625
1626                         if (!forced || /* Real FW completion */
1627                             pci_channel_offline(dev->pdev) || /* FW is inaccessible */
1628                             dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1629                                 cmd_ent_put(ent);
1630
1631                         ent->ts2 = ktime_get_ns();
1632                         memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1633                         dump_command(dev, ent, 0);
1634                         if (!ent->ret) {
1635                                 if (!cmd->checksum_disabled)
1636                                         ent->ret = verify_signature(ent);
1637                                 else
1638                                         ent->ret = 0;
1639                                 if (vec & MLX5_TRIGGERED_CMD_COMP)
1640                                         ent->status = MLX5_DRIVER_STATUS_ABORTED;
1641                                 else
1642                                         ent->status = ent->lay->status_own >> 1;
1643
1644                                 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1645                                               ent->ret, deliv_status_to_str(ent->status), ent->status);
1646                         }
1647
1648                         if (ent->callback) {
1649                                 ds = ent->ts2 - ent->ts1;
1650                                 if (ent->op < MLX5_CMD_OP_MAX) {
1651                                         stats = &cmd->stats[ent->op];
1652                                         spin_lock_irqsave(&stats->lock, flags);
1653                                         stats->sum += ds;
1654                                         ++stats->n;
1655                                         spin_unlock_irqrestore(&stats->lock, flags);
1656                                 }
1657
1658                                 callback = ent->callback;
1659                                 context = ent->context;
1660                                 err = ent->ret;
1661                                 if (!err) {
1662                                         err = mlx5_copy_from_msg(ent->uout,
1663                                                                  ent->out,
1664                                                                  ent->uout_size);
1665
1666                                         err = err ? err : mlx5_cmd_check(dev,
1667                                                                         ent->in->first.data,
1668                                                                         ent->uout);
1669                                 }
1670
1671                                 mlx5_free_cmd_msg(dev, ent->out);
1672                                 free_msg(dev, ent->in);
1673
1674                                 err = err ? err : ent->status;
1675                                 /* final consumer is done, release ent */
1676                                 cmd_ent_put(ent);
1677                                 callback(err, context);
1678                         } else {
1679                                 /* release wait_func() so mlx5_cmd_invoke()
1680                                  * can make the final ent_put()
1681                                  */
1682                                 complete(&ent->done);
1683                         }
1684                 }
1685         }
1686 }
1687
1688 void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1689 {
1690         struct mlx5_cmd *cmd = &dev->cmd;
1691         unsigned long bitmask;
1692         unsigned long flags;
1693         u64 vector;
1694         int i;
1695
1696         /* wait for pending handlers to complete */
1697         mlx5_eq_synchronize_cmd_irq(dev);
1698         spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1699         vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1700         if (!vector)
1701                 goto no_trig;
1702
1703         bitmask = vector;
1704         /* we must increment the allocated entries refcount before triggering the completions
1705          * to guarantee pending commands will not get freed in the meanwhile.
1706          * For that reason, it also has to be done inside the alloc_lock.
1707          */
1708         for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1709                 cmd_ent_get(cmd->ent_arr[i]);
1710         vector |= MLX5_TRIGGERED_CMD_COMP;
1711         spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1712
1713         mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1714         mlx5_cmd_comp_handler(dev, vector, true);
1715         for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1716                 cmd_ent_put(cmd->ent_arr[i]);
1717         return;
1718
1719 no_trig:
1720         spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1721 }
1722
1723 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1724 {
1725         struct mlx5_cmd *cmd = &dev->cmd;
1726         int i;
1727
1728         for (i = 0; i < cmd->max_reg_cmds; i++)
1729                 while (down_trylock(&cmd->sem))
1730                         mlx5_cmd_trigger_completions(dev);
1731
1732         while (down_trylock(&cmd->pages_sem))
1733                 mlx5_cmd_trigger_completions(dev);
1734
1735         /* Unlock cmdif */
1736         up(&cmd->pages_sem);
1737         for (i = 0; i < cmd->max_reg_cmds; i++)
1738                 up(&cmd->sem);
1739 }
1740
1741 static int status_to_err(u8 status)
1742 {
1743         switch (status) {
1744         case MLX5_CMD_DELIVERY_STAT_OK:
1745         case MLX5_DRIVER_STATUS_ABORTED:
1746                 return 0;
1747         case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1748         case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1749                 return -EBADR;
1750         case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1751         case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1752         case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1753                 return -EFAULT; /* Bad address */
1754         case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1755         case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1756         case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1757         case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1758                 return -ENOMSG;
1759         case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1760                 return -EIO;
1761         default:
1762                 return -EINVAL;
1763         }
1764 }
1765
1766 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1767                                       gfp_t gfp)
1768 {
1769         struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1770         struct cmd_msg_cache *ch = NULL;
1771         struct mlx5_cmd *cmd = &dev->cmd;
1772         int i;
1773
1774         if (in_size <= 16)
1775                 goto cache_miss;
1776
1777         for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1778                 ch = &cmd->cache[i];
1779                 if (in_size > ch->max_inbox_size)
1780                         continue;
1781                 spin_lock_irq(&ch->lock);
1782                 if (list_empty(&ch->head)) {
1783                         spin_unlock_irq(&ch->lock);
1784                         continue;
1785                 }
1786                 msg = list_entry(ch->head.next, typeof(*msg), list);
1787                 /* For cached lists, we must explicitly state what is
1788                  * the real size
1789                  */
1790                 msg->len = in_size;
1791                 list_del(&msg->list);
1792                 spin_unlock_irq(&ch->lock);
1793                 break;
1794         }
1795
1796         if (!IS_ERR(msg))
1797                 return msg;
1798
1799 cache_miss:
1800         msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1801         return msg;
1802 }
1803
1804 static int is_manage_pages(void *in)
1805 {
1806         return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1807 }
1808
1809 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1810                     int out_size, mlx5_cmd_cbk_t callback, void *context,
1811                     bool force_polling)
1812 {
1813         struct mlx5_cmd_msg *inb;
1814         struct mlx5_cmd_msg *outb;
1815         int pages_queue;
1816         gfp_t gfp;
1817         int err;
1818         u8 status = 0;
1819         u32 drv_synd;
1820         u16 opcode;
1821         u8 token;
1822
1823         opcode = MLX5_GET(mbox_in, in, opcode);
1824         if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
1825                 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1826                 MLX5_SET(mbox_out, out, status, status);
1827                 MLX5_SET(mbox_out, out, syndrome, drv_synd);
1828                 return err;
1829         }
1830
1831         pages_queue = is_manage_pages(in);
1832         gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1833
1834         inb = alloc_msg(dev, in_size, gfp);
1835         if (IS_ERR(inb)) {
1836                 err = PTR_ERR(inb);
1837                 return err;
1838         }
1839
1840         token = alloc_token(&dev->cmd);
1841
1842         err = mlx5_copy_to_msg(inb, in, in_size, token);
1843         if (err) {
1844                 mlx5_core_warn(dev, "err %d\n", err);
1845                 goto out_in;
1846         }
1847
1848         outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1849         if (IS_ERR(outb)) {
1850                 err = PTR_ERR(outb);
1851                 goto out_in;
1852         }
1853
1854         err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1855                               pages_queue, &status, token, force_polling);
1856         if (err)
1857                 goto out_out;
1858
1859         mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1860         if (status) {
1861                 err = status_to_err(status);
1862                 goto out_out;
1863         }
1864
1865         if (!callback)
1866                 err = mlx5_copy_from_msg(out, outb, out_size);
1867
1868 out_out:
1869         if (!callback)
1870                 mlx5_free_cmd_msg(dev, outb);
1871
1872 out_in:
1873         if (!callback)
1874                 free_msg(dev, inb);
1875         return err;
1876 }
1877
1878 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1879                   int out_size)
1880 {
1881         int err;
1882
1883         err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
1884         return err ? : mlx5_cmd_check(dev, in, out);
1885 }
1886 EXPORT_SYMBOL(mlx5_cmd_exec);
1887
1888 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
1889                              struct mlx5_async_ctx *ctx)
1890 {
1891         ctx->dev = dev;
1892         /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1893         atomic_set(&ctx->num_inflight, 1);
1894         init_waitqueue_head(&ctx->wait);
1895 }
1896 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
1897
1898 /**
1899  * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1900  * @ctx: The ctx to clean
1901  *
1902  * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1903  * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1904  * the call mlx5_cleanup_async_ctx().
1905  */
1906 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
1907 {
1908         atomic_dec(&ctx->num_inflight);
1909         wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
1910 }
1911 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
1912
1913 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
1914 {
1915         struct mlx5_async_work *work = _work;
1916         struct mlx5_async_ctx *ctx = work->ctx;
1917
1918         work->user_callback(status, work);
1919         if (atomic_dec_and_test(&ctx->num_inflight))
1920                 wake_up(&ctx->wait);
1921 }
1922
1923 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
1924                      void *out, int out_size, mlx5_async_cbk_t callback,
1925                      struct mlx5_async_work *work)
1926 {
1927         int ret;
1928
1929         work->ctx = ctx;
1930         work->user_callback = callback;
1931         if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
1932                 return -EIO;
1933         ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
1934                        mlx5_cmd_exec_cb_handler, work, false);
1935         if (ret && atomic_dec_and_test(&ctx->num_inflight))
1936                 wake_up(&ctx->wait);
1937
1938         return ret;
1939 }
1940 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1941
1942 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1943                           void *out, int out_size)
1944 {
1945         int err;
1946
1947         err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
1948
1949         return err ? : mlx5_cmd_check(dev, in, out);
1950 }
1951 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1952
1953 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1954 {
1955         struct cmd_msg_cache *ch;
1956         struct mlx5_cmd_msg *msg;
1957         struct mlx5_cmd_msg *n;
1958         int i;
1959
1960         for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1961                 ch = &dev->cmd.cache[i];
1962                 list_for_each_entry_safe(msg, n, &ch->head, list) {
1963                         list_del(&msg->list);
1964                         mlx5_free_cmd_msg(dev, msg);
1965                 }
1966         }
1967 }
1968
1969 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
1970         512, 32, 16, 8, 2
1971 };
1972
1973 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
1974         16 + MLX5_CMD_DATA_BLOCK_SIZE,
1975         16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
1976         16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
1977         16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
1978         16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
1979 };
1980
1981 static void create_msg_cache(struct mlx5_core_dev *dev)
1982 {
1983         struct mlx5_cmd *cmd = &dev->cmd;
1984         struct cmd_msg_cache *ch;
1985         struct mlx5_cmd_msg *msg;
1986         int i;
1987         int k;
1988
1989         /* Initialize and fill the caches with initial entries */
1990         for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
1991                 ch = &cmd->cache[k];
1992                 spin_lock_init(&ch->lock);
1993                 INIT_LIST_HEAD(&ch->head);
1994                 ch->num_ent = cmd_cache_num_ent[k];
1995                 ch->max_inbox_size = cmd_cache_ent_size[k];
1996                 for (i = 0; i < ch->num_ent; i++) {
1997                         msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
1998                                                  ch->max_inbox_size, 0);
1999                         if (IS_ERR(msg))
2000                                 break;
2001                         msg->parent = ch;
2002                         list_add_tail(&msg->list, &ch->head);
2003                 }
2004         }
2005 }
2006
2007 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2008 {
2009         cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
2010                                                 &cmd->alloc_dma, GFP_KERNEL);
2011         if (!cmd->cmd_alloc_buf)
2012                 return -ENOMEM;
2013
2014         /* make sure it is aligned to 4K */
2015         if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
2016                 cmd->cmd_buf = cmd->cmd_alloc_buf;
2017                 cmd->dma = cmd->alloc_dma;
2018                 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
2019                 return 0;
2020         }
2021
2022         dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
2023                           cmd->alloc_dma);
2024         cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2025                                                 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
2026                                                 &cmd->alloc_dma, GFP_KERNEL);
2027         if (!cmd->cmd_alloc_buf)
2028                 return -ENOMEM;
2029
2030         cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
2031         cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
2032         cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
2033         return 0;
2034 }
2035
2036 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2037 {
2038         dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
2039                           cmd->alloc_dma);
2040 }
2041
2042 static u16 cmdif_rev(struct mlx5_core_dev *dev)
2043 {
2044         return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2045 }
2046
2047 int mlx5_cmd_init(struct mlx5_core_dev *dev)
2048 {
2049         int size = sizeof(struct mlx5_cmd_prot_block);
2050         int align = roundup_pow_of_two(size);
2051         struct mlx5_cmd *cmd = &dev->cmd;
2052         u32 cmd_h, cmd_l;
2053         u16 cmd_if_rev;
2054         int err;
2055         int i;
2056
2057         memset(cmd, 0, sizeof(*cmd));
2058         cmd_if_rev = cmdif_rev(dev);
2059         if (cmd_if_rev != CMD_IF_REV) {
2060                 mlx5_core_err(dev,
2061                               "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2062                               CMD_IF_REV, cmd_if_rev);
2063                 return -EINVAL;
2064         }
2065
2066         cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
2067         if (!cmd->stats)
2068                 return -ENOMEM;
2069
2070         cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
2071         if (!cmd->pool) {
2072                 err = -ENOMEM;
2073                 goto dma_pool_err;
2074         }
2075
2076         err = alloc_cmd_page(dev, cmd);
2077         if (err)
2078                 goto err_free_pool;
2079
2080         cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2081         cmd->log_sz = cmd_l >> 4 & 0xf;
2082         cmd->log_stride = cmd_l & 0xf;
2083         if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
2084                 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2085                               1 << cmd->log_sz);
2086                 err = -EINVAL;
2087                 goto err_free_page;
2088         }
2089
2090         if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2091                 mlx5_core_err(dev, "command queue size overflow\n");
2092                 err = -EINVAL;
2093                 goto err_free_page;
2094         }
2095
2096         cmd->state = MLX5_CMDIF_STATE_DOWN;
2097         cmd->checksum_disabled = 1;
2098         cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
2099         cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
2100
2101         cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2102         if (cmd->cmdif_rev > CMD_IF_REV) {
2103                 mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
2104                               CMD_IF_REV, cmd->cmdif_rev);
2105                 err = -EOPNOTSUPP;
2106                 goto err_free_page;
2107         }
2108
2109         spin_lock_init(&cmd->alloc_lock);
2110         spin_lock_init(&cmd->token_lock);
2111         for (i = 0; i < MLX5_CMD_OP_MAX; i++)
2112                 spin_lock_init(&cmd->stats[i].lock);
2113
2114         sema_init(&cmd->sem, cmd->max_reg_cmds);
2115         sema_init(&cmd->pages_sem, 1);
2116
2117         cmd_h = (u32)((u64)(cmd->dma) >> 32);
2118         cmd_l = (u32)(cmd->dma);
2119         if (cmd_l & 0xfff) {
2120                 mlx5_core_err(dev, "invalid command queue address\n");
2121                 err = -ENOMEM;
2122                 goto err_free_page;
2123         }
2124
2125         iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2126         iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2127
2128         /* Make sure firmware sees the complete address before we proceed */
2129         wmb();
2130
2131         mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2132
2133         cmd->mode = CMD_MODE_POLLING;
2134         cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2135
2136         create_msg_cache(dev);
2137
2138         set_wqname(dev);
2139         cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2140         if (!cmd->wq) {
2141                 mlx5_core_err(dev, "failed to create command workqueue\n");
2142                 err = -ENOMEM;
2143                 goto err_cache;
2144         }
2145
2146         create_debugfs_files(dev);
2147
2148         return 0;
2149
2150 err_cache:
2151         destroy_msg_cache(dev);
2152
2153 err_free_page:
2154         free_cmd_page(dev, cmd);
2155
2156 err_free_pool:
2157         dma_pool_destroy(cmd->pool);
2158 dma_pool_err:
2159         kvfree(cmd->stats);
2160         return err;
2161 }
2162
2163 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2164 {
2165         struct mlx5_cmd *cmd = &dev->cmd;
2166
2167         clean_debug_files(dev);
2168         destroy_workqueue(cmd->wq);
2169         destroy_msg_cache(dev);
2170         free_cmd_page(dev, cmd);
2171         dma_pool_destroy(cmd->pool);
2172         kvfree(cmd->stats);
2173 }
2174
2175 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2176                         enum mlx5_cmdif_state cmdif_state)
2177 {
2178         dev->cmd.state = cmdif_state;
2179 }