1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <linux/etherdevice.h>
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
25 #define GRCBASE_MCP 0xe00000
27 #define QED_MCP_RESP_ITER_US 10
29 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
30 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
33 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 offsetof(struct public_drv_mb, _field), _val)
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 offsetof(struct public_drv_mb, _field))
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 DRV_ID_PDA_COMP_VER_SHIFT)
50 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
54 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
59 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
61 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
63 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
65 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
67 DP_VERBOSE(p_hwfn, QED_MSG_SP,
68 "port_addr = 0x%x, port_id 0x%02x\n",
69 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
72 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
74 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
77 if (!p_hwfn->mcp_info->public_base)
80 for (i = 0; i < length; i++) {
81 tmp = qed_rd(p_hwfn, p_ptt,
82 p_hwfn->mcp_info->mfw_mb_addr +
83 (i << 2) + sizeof(u32));
85 /* The MB data is actually BE; Need to force it to cpu */
86 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
87 be32_to_cpu((__force __be32)tmp);
91 struct qed_mcp_cmd_elem {
92 struct list_head list;
93 struct qed_mcp_mb_params *p_mb_params;
98 /* Must be called while cmd_lock is acquired */
99 static struct qed_mcp_cmd_elem *
100 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
101 struct qed_mcp_mb_params *p_mb_params,
102 u16 expected_seq_num)
104 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
106 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
110 p_cmd_elem->p_mb_params = p_mb_params;
111 p_cmd_elem->expected_seq_num = expected_seq_num;
112 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
117 /* Must be called while cmd_lock is acquired */
118 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
119 struct qed_mcp_cmd_elem *p_cmd_elem)
121 list_del(&p_cmd_elem->list);
125 /* Must be called while cmd_lock is acquired */
126 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
129 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
131 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
132 if (p_cmd_elem->expected_seq_num == seq_num)
139 int qed_mcp_free(struct qed_hwfn *p_hwfn)
141 if (p_hwfn->mcp_info) {
142 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
144 kfree(p_hwfn->mcp_info->mfw_mb_cur);
145 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
147 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
148 list_for_each_entry_safe(p_cmd_elem,
150 &p_hwfn->mcp_info->cmd_list, list) {
151 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
153 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
156 kfree(p_hwfn->mcp_info);
157 p_hwfn->mcp_info = NULL;
162 /* Maximum of 1 sec to wait for the SHMEM ready indication */
163 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
164 #define QED_MCP_SHMEM_RDY_ITER_MS 50
166 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
168 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
169 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
170 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
171 u32 drv_mb_offsize, mfw_mb_offsize;
172 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
174 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
175 if (!p_info->public_base) {
177 "The address of the MCP scratch-pad is not configured\n");
181 p_info->public_base |= GRCBASE_MCP;
183 /* Get the MFW MB address and number of supported messages */
184 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
185 SECTION_OFFSIZE_ADDR(p_info->public_base,
187 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
188 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
189 p_info->mfw_mb_addr +
190 offsetof(struct public_mfw_mb,
193 /* The driver can notify that there was an MCP reset, and might read the
194 * SHMEM values before the MFW has completed initializing them.
195 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
196 * data ready indication.
198 while (!p_info->mfw_mb_length && --cnt) {
200 p_info->mfw_mb_length =
201 (u16)qed_rd(p_hwfn, p_ptt,
202 p_info->mfw_mb_addr +
203 offsetof(struct public_mfw_mb, sup_msgs));
208 "Failed to get the SHMEM ready notification after %d msec\n",
209 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
213 /* Calculate the driver and MFW mailbox address */
214 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
215 SECTION_OFFSIZE_ADDR(p_info->public_base,
217 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
218 DP_VERBOSE(p_hwfn, QED_MSG_SP,
219 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
220 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
222 /* Get the current driver mailbox sequence before sending
225 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
226 DRV_MSG_SEQ_NUMBER_MASK;
228 /* Get current FW pulse sequence */
229 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
232 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
237 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
239 struct qed_mcp_info *p_info;
242 /* Allocate mcp_info structure */
243 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
244 if (!p_hwfn->mcp_info)
246 p_info = p_hwfn->mcp_info;
248 /* Initialize the MFW spinlock */
249 spin_lock_init(&p_info->cmd_lock);
250 spin_lock_init(&p_info->link_lock);
252 INIT_LIST_HEAD(&p_info->cmd_list);
254 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
255 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
256 /* Do not free mcp_info here, since public_base indicate that
257 * the MCP is not initialized
262 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
263 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
264 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
265 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
271 qed_mcp_free(p_hwfn);
275 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
276 struct qed_ptt *p_ptt)
278 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
280 /* Use MCP history register to check if MCP reset occurred between init
283 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
286 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
287 p_hwfn->mcp_info->mcp_hist, generic_por_0);
289 qed_load_mcp_offsets(p_hwfn, p_ptt);
290 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
294 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
296 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
299 if (p_hwfn->mcp_info->b_block_cmd) {
301 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
305 /* Ensure that only a single thread is accessing the mailbox */
306 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
308 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
310 /* Set drv command along with the updated sequence */
311 qed_mcp_reread_offsets(p_hwfn, p_ptt);
312 seq = ++p_hwfn->mcp_info->drv_mb_seq;
313 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
316 /* Wait for MFW response */
318 /* Give the FW up to 500 second (50*1000*10usec) */
319 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
320 MISCS_REG_GENERIC_POR_0)) &&
321 (cnt++ < QED_MCP_RESET_RETRIES));
323 if (org_mcp_reset_seq !=
324 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 DP_VERBOSE(p_hwfn, QED_MSG_SP,
326 "MCP was reset after %d usec\n", cnt * delay);
328 DP_ERR(p_hwfn, "Failed to reset MCP\n");
332 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
337 /* Must be called while cmd_lock is acquired */
338 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
340 struct qed_mcp_cmd_elem *p_cmd_elem;
342 /* There is at most one pending command at a certain time, and if it
343 * exists - it is placed at the HEAD of the list.
345 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
346 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
347 struct qed_mcp_cmd_elem, list);
348 return !p_cmd_elem->b_is_completed;
354 /* Must be called while cmd_lock is acquired */
356 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
358 struct qed_mcp_mb_params *p_mb_params;
359 struct qed_mcp_cmd_elem *p_cmd_elem;
363 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
364 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
366 /* Return if no new non-handled response has been received */
367 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
370 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
373 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
378 p_mb_params = p_cmd_elem->p_mb_params;
380 /* Get the MFW response along with the sequence number */
381 p_mb_params->mcp_resp = mcp_resp;
383 /* Get the MFW param */
384 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
386 /* Get the union data */
387 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
388 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
389 offsetof(struct public_drv_mb,
391 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
392 union_data_addr, p_mb_params->data_dst_size);
395 p_cmd_elem->b_is_completed = true;
400 /* Must be called while cmd_lock is acquired */
401 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
402 struct qed_ptt *p_ptt,
403 struct qed_mcp_mb_params *p_mb_params,
406 union drv_union_data union_data;
409 /* Set the union data */
410 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
411 offsetof(struct public_drv_mb, union_data);
412 memset(&union_data, 0, sizeof(union_data));
413 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
414 memcpy(&union_data, p_mb_params->p_data_src,
415 p_mb_params->data_src_size);
416 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
419 /* Set the drv param */
420 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
422 /* Set the drv command along with the sequence number */
423 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
425 DP_VERBOSE(p_hwfn, QED_MSG_SP,
426 "MFW mailbox: command 0x%08x param 0x%08x\n",
427 (p_mb_params->cmd | seq_num), p_mb_params->param);
430 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
432 p_hwfn->mcp_info->b_block_cmd = block_cmd;
434 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
435 block_cmd ? "Block" : "Unblock");
438 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
439 struct qed_ptt *p_ptt)
441 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
442 u32 delay = QED_MCP_RESP_ITER_US;
444 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
445 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
446 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
448 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
450 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
453 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
454 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
458 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
459 struct qed_ptt *p_ptt,
460 struct qed_mcp_mb_params *p_mb_params,
461 u32 max_retries, u32 usecs)
463 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
464 struct qed_mcp_cmd_elem *p_cmd_elem;
468 /* Wait until the mailbox is non-occupied */
470 /* Exit the loop if there is no pending command, or if the
471 * pending command is completed during this iteration.
472 * The spinlock stays locked until the command is sent.
475 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
477 if (!qed_mcp_has_pending_cmd(p_hwfn))
480 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
483 else if (rc != -EAGAIN)
486 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
488 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
492 } while (++cnt < max_retries);
494 if (cnt >= max_retries) {
496 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
497 p_mb_params->cmd, p_mb_params->param);
501 /* Send the mailbox command */
502 qed_mcp_reread_offsets(p_hwfn, p_ptt);
503 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
504 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
510 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
511 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
513 /* Wait for the MFW response */
515 /* Exit the loop if the command is already completed, or if the
516 * command is completed during this iteration.
517 * The spinlock stays locked until the list element is removed.
520 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
525 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
527 if (p_cmd_elem->b_is_completed)
530 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
533 else if (rc != -EAGAIN)
536 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
537 } while (++cnt < max_retries);
539 if (cnt >= max_retries) {
541 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
542 p_mb_params->cmd, p_mb_params->param);
543 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
545 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
546 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
547 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
549 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
550 qed_mcp_cmd_set_blocking(p_hwfn, true);
552 qed_hw_err_notify(p_hwfn, p_ptt,
553 QED_HW_ERR_MFW_RESP_FAIL, NULL);
557 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
558 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
562 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
563 p_mb_params->mcp_resp,
564 p_mb_params->mcp_param,
565 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
567 /* Clear the sequence number from the MFW response */
568 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
573 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
577 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
578 struct qed_ptt *p_ptt,
579 struct qed_mcp_mb_params *p_mb_params)
581 size_t union_data_size = sizeof(union drv_union_data);
582 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
583 u32 usecs = QED_MCP_RESP_ITER_US;
585 /* MCP not initialized */
586 if (!qed_mcp_is_init(p_hwfn)) {
587 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
591 if (p_hwfn->mcp_info->b_block_cmd) {
593 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
594 p_mb_params->cmd, p_mb_params->param);
598 if (p_mb_params->data_src_size > union_data_size ||
599 p_mb_params->data_dst_size > union_data_size) {
601 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
602 p_mb_params->data_src_size,
603 p_mb_params->data_dst_size, union_data_size);
607 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
608 max_retries = DIV_ROUND_UP(max_retries, 1000);
612 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
616 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
617 struct qed_ptt *p_ptt,
623 struct qed_mcp_mb_params mb_params;
626 memset(&mb_params, 0, sizeof(mb_params));
628 mb_params.param = param;
630 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
634 *o_mcp_resp = mb_params.mcp_resp;
635 *o_mcp_param = mb_params.mcp_param;
641 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
642 struct qed_ptt *p_ptt,
646 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
648 struct qed_mcp_mb_params mb_params;
651 memset(&mb_params, 0, sizeof(mb_params));
653 mb_params.param = param;
654 mb_params.p_data_src = i_buf;
655 mb_params.data_src_size = (u8)i_txn_size;
656 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
660 *o_mcp_resp = mb_params.mcp_resp;
661 *o_mcp_param = mb_params.mcp_param;
663 /* nvm_info needs to be updated */
664 p_hwfn->nvm_info.valid = false;
669 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
670 struct qed_ptt *p_ptt,
674 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
676 struct qed_mcp_mb_params mb_params;
677 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
680 memset(&mb_params, 0, sizeof(mb_params));
682 mb_params.param = param;
683 mb_params.p_data_dst = raw_data;
685 /* Use the maximal value since the actual one is part of the response */
686 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
688 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
692 *o_mcp_resp = mb_params.mcp_resp;
693 *o_mcp_param = mb_params.mcp_param;
695 *o_txn_size = *o_mcp_param;
696 memcpy(o_buf, raw_data, *o_txn_size);
702 qed_mcp_can_force_load(u8 drv_role,
704 enum qed_override_force_load override_force_load)
706 bool can_force_load = false;
708 switch (override_force_load) {
709 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
710 can_force_load = true;
712 case QED_OVERRIDE_FORCE_LOAD_NEVER:
713 can_force_load = false;
716 can_force_load = (drv_role == DRV_ROLE_OS &&
717 exist_drv_role == DRV_ROLE_PREBOOT) ||
718 (drv_role == DRV_ROLE_KDUMP &&
719 exist_drv_role == DRV_ROLE_OS);
723 return can_force_load;
726 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
727 struct qed_ptt *p_ptt)
729 u32 resp = 0, param = 0;
732 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
736 "Failed to send cancel load request, rc = %d\n", rc);
741 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
742 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
743 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
744 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
745 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
746 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
748 static u32 qed_get_config_bitmap(void)
750 u32 config_bitmap = 0x0;
752 if (IS_ENABLED(CONFIG_QEDE))
753 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
755 if (IS_ENABLED(CONFIG_QED_SRIOV))
756 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
758 if (IS_ENABLED(CONFIG_QED_RDMA))
759 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
761 if (IS_ENABLED(CONFIG_QED_FCOE))
762 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
764 if (IS_ENABLED(CONFIG_QED_ISCSI))
765 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
767 if (IS_ENABLED(CONFIG_QED_LL2))
768 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
770 return config_bitmap;
773 struct qed_load_req_in_params {
775 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
776 #define QED_LOAD_REQ_HSI_VER_1 1
783 bool avoid_eng_reset;
786 struct qed_load_req_out_params {
797 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
798 struct qed_ptt *p_ptt,
799 struct qed_load_req_in_params *p_in_params,
800 struct qed_load_req_out_params *p_out_params)
802 struct qed_mcp_mb_params mb_params;
803 struct load_req_stc load_req;
804 struct load_rsp_stc load_rsp;
808 memset(&load_req, 0, sizeof(load_req));
809 load_req.drv_ver_0 = p_in_params->drv_ver_0;
810 load_req.drv_ver_1 = p_in_params->drv_ver_1;
811 load_req.fw_ver = p_in_params->fw_ver;
812 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
813 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
814 p_in_params->timeout_val);
815 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
816 p_in_params->force_cmd);
817 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
818 p_in_params->avoid_eng_reset);
820 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
821 DRV_ID_MCP_HSI_VER_CURRENT :
822 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
824 memset(&mb_params, 0, sizeof(mb_params));
825 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
826 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
827 mb_params.p_data_src = &load_req;
828 mb_params.data_src_size = sizeof(load_req);
829 mb_params.p_data_dst = &load_rsp;
830 mb_params.data_dst_size = sizeof(load_rsp);
831 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
833 DP_VERBOSE(p_hwfn, QED_MSG_SP,
834 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
836 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
837 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
838 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
839 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
841 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
842 DP_VERBOSE(p_hwfn, QED_MSG_SP,
843 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
848 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
849 QED_MFW_GET_FIELD(load_req.misc0,
851 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
852 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
855 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
857 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
861 DP_VERBOSE(p_hwfn, QED_MSG_SP,
862 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
863 p_out_params->load_code = mb_params.mcp_resp;
865 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
866 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
869 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
874 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
875 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
876 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
878 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
879 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
880 p_out_params->exist_fw_ver = load_rsp.fw_ver;
881 p_out_params->exist_drv_role =
882 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
883 p_out_params->mfw_hsi_ver =
884 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
885 p_out_params->drv_exists =
886 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
887 LOAD_RSP_FLAGS0_DRV_EXISTS;
893 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
894 enum qed_drv_role drv_role,
898 case QED_DRV_ROLE_OS:
899 *p_mfw_drv_role = DRV_ROLE_OS;
901 case QED_DRV_ROLE_KDUMP:
902 *p_mfw_drv_role = DRV_ROLE_KDUMP;
905 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
912 enum qed_load_req_force {
913 QED_LOAD_REQ_FORCE_NONE,
914 QED_LOAD_REQ_FORCE_PF,
915 QED_LOAD_REQ_FORCE_ALL,
918 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
920 enum qed_load_req_force force_cmd,
924 case QED_LOAD_REQ_FORCE_NONE:
925 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
927 case QED_LOAD_REQ_FORCE_PF:
928 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
930 case QED_LOAD_REQ_FORCE_ALL:
931 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
936 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
937 struct qed_ptt *p_ptt,
938 struct qed_load_req_params *p_params)
940 struct qed_load_req_out_params out_params;
941 struct qed_load_req_in_params in_params;
942 u8 mfw_drv_role, mfw_force_cmd;
945 memset(&in_params, 0, sizeof(in_params));
946 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
947 in_params.drv_ver_1 = qed_get_config_bitmap();
948 in_params.fw_ver = STORM_FW_VERSION;
949 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
953 in_params.drv_role = mfw_drv_role;
954 in_params.timeout_val = p_params->timeout_val;
955 qed_get_mfw_force_cmd(p_hwfn,
956 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
958 in_params.force_cmd = mfw_force_cmd;
959 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
961 memset(&out_params, 0, sizeof(out_params));
962 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
966 /* First handle cases where another load request should/might be sent:
967 * - MFW expects the old interface [HSI version = 1]
968 * - MFW responds that a force load request is required
970 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
972 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
974 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
975 memset(&out_params, 0, sizeof(out_params));
976 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
979 } else if (out_params.load_code ==
980 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
981 if (qed_mcp_can_force_load(in_params.drv_role,
982 out_params.exist_drv_role,
983 p_params->override_force_load)) {
985 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
986 in_params.drv_role, in_params.fw_ver,
987 in_params.drv_ver_0, in_params.drv_ver_1,
988 out_params.exist_drv_role,
989 out_params.exist_fw_ver,
990 out_params.exist_drv_ver_0,
991 out_params.exist_drv_ver_1);
993 qed_get_mfw_force_cmd(p_hwfn,
994 QED_LOAD_REQ_FORCE_ALL,
997 in_params.force_cmd = mfw_force_cmd;
998 memset(&out_params, 0, sizeof(out_params));
999 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1005 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1006 in_params.drv_role, in_params.fw_ver,
1007 in_params.drv_ver_0, in_params.drv_ver_1,
1008 out_params.exist_drv_role,
1009 out_params.exist_fw_ver,
1010 out_params.exist_drv_ver_0,
1011 out_params.exist_drv_ver_1);
1013 "Avoid sending a force load request to prevent disruption of active PFs\n");
1015 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1020 /* Now handle the other types of responses.
1021 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1022 * expected here after the additional revised load requests were sent.
1024 switch (out_params.load_code) {
1025 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1026 case FW_MSG_CODE_DRV_LOAD_PORT:
1027 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1028 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1029 out_params.drv_exists) {
1030 /* The role and fw/driver version match, but the PF is
1031 * already loaded and has not been unloaded gracefully.
1034 "PF is already loaded\n");
1040 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1041 out_params.load_code);
1045 p_params->load_code = out_params.load_code;
1050 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1052 u32 resp = 0, param = 0;
1055 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1059 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1063 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1064 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1066 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1071 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1073 struct qed_mcp_mb_params mb_params;
1076 switch (p_hwfn->cdev->wol_config) {
1077 case QED_OV_WOL_DISABLED:
1078 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1080 case QED_OV_WOL_ENABLED:
1081 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1085 "Unknown WoL configuration %02x\n",
1086 p_hwfn->cdev->wol_config);
1088 case QED_OV_WOL_DEFAULT:
1089 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1092 memset(&mb_params, 0, sizeof(mb_params));
1093 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1094 mb_params.param = wol_param;
1095 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1097 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1100 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1102 struct qed_mcp_mb_params mb_params;
1103 struct mcp_mac wol_mac;
1105 memset(&mb_params, 0, sizeof(mb_params));
1106 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1108 /* Set the primary MAC if WoL is enabled */
1109 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1110 u8 *p_mac = p_hwfn->cdev->wol_mac;
1112 memset(&wol_mac, 0, sizeof(wol_mac));
1113 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1114 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1115 p_mac[4] << 8 | p_mac[5];
1118 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1119 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1120 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1122 mb_params.p_data_src = &wol_mac;
1123 mb_params.data_src_size = sizeof(wol_mac);
1126 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1129 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1130 struct qed_ptt *p_ptt)
1132 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1134 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1135 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1136 QED_PATH_ID(p_hwfn));
1137 u32 disabled_vfs[VF_MAX_STATIC / 32];
1142 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1143 mfw_path_offsize, path_addr);
1145 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1146 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1148 offsetof(struct public_path,
1151 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1152 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1153 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1156 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1157 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1160 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1161 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1163 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1165 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1166 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1168 struct qed_mcp_mb_params mb_params;
1172 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1173 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1174 "Acking VFs [%08x,...,%08x] - %08x\n",
1175 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1177 memset(&mb_params, 0, sizeof(mb_params));
1178 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1179 mb_params.p_data_src = vfs_to_ack;
1180 mb_params.data_src_size = VF_MAX_STATIC / 8;
1181 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1183 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1187 /* Clear the ACK bits */
1188 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1189 qed_wr(p_hwfn, p_ptt,
1191 offsetof(struct public_func, drv_ack_vf_disabled) +
1192 i * sizeof(u32), 0);
1197 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1198 struct qed_ptt *p_ptt)
1200 u32 transceiver_state;
1202 transceiver_state = qed_rd(p_hwfn, p_ptt,
1203 p_hwfn->mcp_info->port_addr +
1204 offsetof(struct public_port,
1208 (NETIF_MSG_HW | QED_MSG_SP),
1209 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1211 (u32)(p_hwfn->mcp_info->port_addr +
1212 offsetof(struct public_port, transceiver_data)));
1214 transceiver_state = GET_FIELD(transceiver_state,
1215 ETH_TRANSCEIVER_STATE);
1217 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1218 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1220 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1223 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1224 struct qed_ptt *p_ptt,
1225 struct qed_mcp_link_state *p_link)
1227 u32 eee_status, val;
1229 p_link->eee_adv_caps = 0;
1230 p_link->eee_lp_adv_caps = 0;
1231 eee_status = qed_rd(p_hwfn,
1233 p_hwfn->mcp_info->port_addr +
1234 offsetof(struct public_port, eee_status));
1235 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1236 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1237 if (val & EEE_1G_ADV)
1238 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1239 if (val & EEE_10G_ADV)
1240 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1241 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1242 if (val & EEE_1G_ADV)
1243 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1244 if (val & EEE_10G_ADV)
1245 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1248 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1249 struct qed_ptt *p_ptt,
1250 struct public_func *p_data, int pfid)
1252 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1254 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1258 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1259 memset(p_data, 0, sizeof(*p_data));
1261 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1262 for (i = 0; i < size / sizeof(u32); i++)
1263 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1264 func_addr + (i << 2));
1268 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1269 struct public_func *p_shmem_info)
1271 struct qed_mcp_function_info *p_info;
1273 p_info = &p_hwfn->mcp_info->func_info;
1275 p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1276 FUNC_MF_CFG_MIN_BW);
1277 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1279 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1280 p_info->bandwidth_min);
1281 p_info->bandwidth_min = 1;
1284 p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1285 FUNC_MF_CFG_MAX_BW);
1286 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1288 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1289 p_info->bandwidth_max);
1290 p_info->bandwidth_max = 100;
1294 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1295 struct qed_ptt *p_ptt, bool b_reset)
1297 struct qed_mcp_link_state *p_link;
1301 /* Prevent SW/attentions from doing this at the same time */
1302 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1304 p_link = &p_hwfn->mcp_info->link_output;
1305 memset(p_link, 0, sizeof(*p_link));
1307 status = qed_rd(p_hwfn, p_ptt,
1308 p_hwfn->mcp_info->port_addr +
1309 offsetof(struct public_port, link_status));
1310 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1311 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1313 (u32)(p_hwfn->mcp_info->port_addr +
1314 offsetof(struct public_port, link_status)));
1316 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1317 "Resetting link indications\n");
1321 if (p_hwfn->b_drv_link_init) {
1322 /* Link indication with modern MFW arrives as per-PF
1325 if (p_hwfn->mcp_info->capabilities &
1326 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1327 struct public_func shmem_info;
1329 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1331 p_link->link_up = !!(shmem_info.status &
1332 FUNC_STATUS_VIRTUAL_LINK_UP);
1333 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1334 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1335 "Virtual link_up = %d\n", p_link->link_up);
1337 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1338 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1339 "Physical link_up = %d\n", p_link->link_up);
1342 p_link->link_up = false;
1345 p_link->full_duplex = true;
1346 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1347 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1348 p_link->speed = 100000;
1350 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1351 p_link->speed = 50000;
1353 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1354 p_link->speed = 40000;
1356 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1357 p_link->speed = 25000;
1359 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1360 p_link->speed = 20000;
1362 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1363 p_link->speed = 10000;
1365 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1366 p_link->full_duplex = false;
1368 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1369 p_link->speed = 1000;
1373 p_link->link_up = 0;
1376 if (p_link->link_up && p_link->speed)
1377 p_link->line_speed = p_link->speed;
1379 p_link->line_speed = 0;
1381 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1382 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1384 /* Max bandwidth configuration */
1385 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1387 /* Min bandwidth configuration */
1388 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1389 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1390 p_link->min_pf_rate);
1392 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1393 p_link->an_complete = !!(status &
1394 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1395 p_link->parallel_detection = !!(status &
1396 LINK_STATUS_PARALLEL_DETECTION_USED);
1397 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1399 p_link->partner_adv_speed |=
1400 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1401 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1402 p_link->partner_adv_speed |=
1403 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1404 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1405 p_link->partner_adv_speed |=
1406 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1407 QED_LINK_PARTNER_SPEED_10G : 0;
1408 p_link->partner_adv_speed |=
1409 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1410 QED_LINK_PARTNER_SPEED_20G : 0;
1411 p_link->partner_adv_speed |=
1412 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1413 QED_LINK_PARTNER_SPEED_25G : 0;
1414 p_link->partner_adv_speed |=
1415 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1416 QED_LINK_PARTNER_SPEED_40G : 0;
1417 p_link->partner_adv_speed |=
1418 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1419 QED_LINK_PARTNER_SPEED_50G : 0;
1420 p_link->partner_adv_speed |=
1421 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1422 QED_LINK_PARTNER_SPEED_100G : 0;
1424 p_link->partner_tx_flow_ctrl_en =
1425 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1426 p_link->partner_rx_flow_ctrl_en =
1427 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1429 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1430 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1431 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1433 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1434 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1436 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1437 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1440 p_link->partner_adv_pause = 0;
1443 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1445 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1446 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1448 if (p_hwfn->mcp_info->capabilities &
1449 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1450 switch (status & LINK_STATUS_FEC_MODE_MASK) {
1451 case LINK_STATUS_FEC_MODE_NONE:
1452 p_link->fec_active = QED_FEC_MODE_NONE;
1454 case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
1455 p_link->fec_active = QED_FEC_MODE_FIRECODE;
1457 case LINK_STATUS_FEC_MODE_RS_CL91:
1458 p_link->fec_active = QED_FEC_MODE_RS;
1461 p_link->fec_active = QED_FEC_MODE_AUTO;
1464 p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
1467 qed_link_update(p_hwfn, p_ptt);
1469 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1472 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1474 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1475 struct qed_mcp_mb_params mb_params;
1476 struct eth_phy_cfg phy_cfg;
1477 u32 cmd, fec_bit = 0;
1481 /* Set the shmem configuration according to params */
1482 memset(&phy_cfg, 0, sizeof(phy_cfg));
1483 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1484 if (!params->speed.autoneg)
1485 phy_cfg.speed = params->speed.forced_speed;
1486 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1487 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1488 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1489 phy_cfg.adv_speed = params->speed.advertised_speeds;
1490 phy_cfg.loopback_mode = params->loopback_mode;
1492 /* There are MFWs that share this capability regardless of whether
1493 * this is feasible or not. And given that at the very least adv_caps
1494 * would be set internally by qed, we want to make sure LFA would
1497 if ((p_hwfn->mcp_info->capabilities &
1498 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1499 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1500 if (params->eee.tx_lpi_enable)
1501 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1502 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1503 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1504 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1505 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1506 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1507 EEE_TX_TIMER_USEC_OFFSET) &
1508 EEE_TX_TIMER_USEC_MASK;
1511 if (p_hwfn->mcp_info->capabilities &
1512 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1513 if (params->fec & QED_FEC_MODE_NONE)
1514 fec_bit |= FEC_FORCE_MODE_NONE;
1515 else if (params->fec & QED_FEC_MODE_FIRECODE)
1516 fec_bit |= FEC_FORCE_MODE_FIRECODE;
1517 else if (params->fec & QED_FEC_MODE_RS)
1518 fec_bit |= FEC_FORCE_MODE_RS;
1519 else if (params->fec & QED_FEC_MODE_AUTO)
1520 fec_bit |= FEC_FORCE_MODE_AUTO;
1522 SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
1525 if (p_hwfn->mcp_info->capabilities &
1526 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
1528 if (params->ext_speed.autoneg)
1529 ext_speed |= ETH_EXT_SPEED_AN;
1531 val = params->ext_speed.forced_speed;
1532 if (val & QED_EXT_SPEED_1G)
1533 ext_speed |= ETH_EXT_SPEED_1G;
1534 if (val & QED_EXT_SPEED_10G)
1535 ext_speed |= ETH_EXT_SPEED_10G;
1536 if (val & QED_EXT_SPEED_20G)
1537 ext_speed |= ETH_EXT_SPEED_20G;
1538 if (val & QED_EXT_SPEED_25G)
1539 ext_speed |= ETH_EXT_SPEED_25G;
1540 if (val & QED_EXT_SPEED_40G)
1541 ext_speed |= ETH_EXT_SPEED_40G;
1542 if (val & QED_EXT_SPEED_50G_R)
1543 ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
1544 if (val & QED_EXT_SPEED_50G_R2)
1545 ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
1546 if (val & QED_EXT_SPEED_100G_R2)
1547 ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
1548 if (val & QED_EXT_SPEED_100G_R4)
1549 ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
1550 if (val & QED_EXT_SPEED_100G_P4)
1551 ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
1553 SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
1558 val = params->ext_speed.advertised_speeds;
1559 if (val & QED_EXT_SPEED_MASK_1G)
1560 ext_speed |= ETH_EXT_ADV_SPEED_1G;
1561 if (val & QED_EXT_SPEED_MASK_10G)
1562 ext_speed |= ETH_EXT_ADV_SPEED_10G;
1563 if (val & QED_EXT_SPEED_MASK_20G)
1564 ext_speed |= ETH_EXT_ADV_SPEED_20G;
1565 if (val & QED_EXT_SPEED_MASK_25G)
1566 ext_speed |= ETH_EXT_ADV_SPEED_25G;
1567 if (val & QED_EXT_SPEED_MASK_40G)
1568 ext_speed |= ETH_EXT_ADV_SPEED_40G;
1569 if (val & QED_EXT_SPEED_MASK_50G_R)
1570 ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
1571 if (val & QED_EXT_SPEED_MASK_50G_R2)
1572 ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
1573 if (val & QED_EXT_SPEED_MASK_100G_R2)
1574 ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
1575 if (val & QED_EXT_SPEED_MASK_100G_R4)
1576 ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
1577 if (val & QED_EXT_SPEED_MASK_100G_P4)
1578 ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
1580 phy_cfg.extended_speed |= ext_speed;
1582 SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
1583 params->ext_fec_mode);
1586 p_hwfn->b_drv_link_init = b_up;
1589 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1590 "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1591 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1592 phy_cfg.loopback_mode, phy_cfg.fec_mode,
1593 phy_cfg.extended_speed);
1595 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
1598 memset(&mb_params, 0, sizeof(mb_params));
1599 mb_params.cmd = cmd;
1600 mb_params.p_data_src = &phy_cfg;
1601 mb_params.data_src_size = sizeof(phy_cfg);
1602 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1604 /* if mcp fails to respond we must abort */
1606 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1610 /* Mimic link-change attention, done for several reasons:
1611 * - On reset, there's no guarantee MFW would trigger
1613 * - On initialization, older MFWs might not indicate link change
1614 * during LFA, so we'll never get an UP indication.
1616 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1621 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1622 struct qed_ptt *p_ptt)
1624 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1626 if (IS_VF(p_hwfn->cdev))
1629 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1631 path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1632 path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1634 proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1636 offsetof(struct public_path, process_kill)) &
1637 PROCESS_KILL_COUNTER_MASK;
1639 return proc_kill_cnt;
1642 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1643 struct qed_ptt *p_ptt)
1645 struct qed_dev *cdev = p_hwfn->cdev;
1648 /* Prevent possible attentions/interrupts during the recovery handling
1649 * and till its load phase, during which they will be re-enabled.
1651 qed_int_igu_disable_int(p_hwfn, p_ptt);
1653 DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1655 /* The following operations should be done once, and thus in CMT mode
1656 * are carried out by only the first HW function.
1658 if (p_hwfn != QED_LEADING_HWFN(cdev))
1661 if (cdev->recov_in_prog) {
1663 "Ignoring the indication since a recovery process is already in progress\n");
1667 cdev->recov_in_prog = true;
1669 proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1670 DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1672 qed_schedule_recovery_handler(p_hwfn);
1675 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1676 struct qed_ptt *p_ptt,
1677 enum MFW_DRV_MSG_TYPE type)
1679 enum qed_mcp_protocol_type stats_type;
1680 union qed_mcp_protocol_stats stats;
1681 struct qed_mcp_mb_params mb_params;
1685 case MFW_DRV_MSG_GET_LAN_STATS:
1686 stats_type = QED_MCP_LAN_STATS;
1687 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1689 case MFW_DRV_MSG_GET_FCOE_STATS:
1690 stats_type = QED_MCP_FCOE_STATS;
1691 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1693 case MFW_DRV_MSG_GET_ISCSI_STATS:
1694 stats_type = QED_MCP_ISCSI_STATS;
1695 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1697 case MFW_DRV_MSG_GET_RDMA_STATS:
1698 stats_type = QED_MCP_RDMA_STATS;
1699 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1702 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1706 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1708 memset(&mb_params, 0, sizeof(mb_params));
1709 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1710 mb_params.param = hsi_param;
1711 mb_params.p_data_src = &stats;
1712 mb_params.data_src_size = sizeof(stats);
1713 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1716 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1718 struct qed_mcp_function_info *p_info;
1719 struct public_func shmem_info;
1720 u32 resp = 0, param = 0;
1722 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1724 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1726 p_info = &p_hwfn->mcp_info->func_info;
1728 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1729 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1731 /* Acknowledge the MFW */
1732 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1736 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1738 struct public_func shmem_info;
1739 u32 resp = 0, param = 0;
1741 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1743 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1744 FUNC_MF_CFG_OV_STAG_MASK;
1745 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1746 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1747 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1748 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1749 p_hwfn->hw_info.ovlan);
1750 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1752 /* Configure DB to add external vlan to EDPM packets */
1753 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1754 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1755 p_hwfn->hw_info.ovlan);
1757 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1758 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1759 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1760 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1763 qed_sp_pf_update_stag(p_hwfn);
1766 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1767 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1769 /* Acknowledge the MFW */
1770 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1774 static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1775 struct qed_ptt *p_ptt)
1777 /* A single notification should be sent to upper driver in CMT mode */
1778 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1781 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1782 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1785 struct qed_mdump_cmd_params {
1795 qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1796 struct qed_ptt *p_ptt,
1797 struct qed_mdump_cmd_params *p_mdump_cmd_params)
1799 struct qed_mcp_mb_params mb_params;
1802 memset(&mb_params, 0, sizeof(mb_params));
1803 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1804 mb_params.param = p_mdump_cmd_params->cmd;
1805 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1806 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1807 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1808 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1809 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1813 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1815 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1817 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1818 p_mdump_cmd_params->cmd);
1820 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1822 "The mdump command is not supported by the MFW\n");
1829 static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1831 struct qed_mdump_cmd_params mdump_cmd_params;
1833 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1834 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1836 return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1840 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1841 struct qed_ptt *p_ptt,
1842 struct mdump_retain_data_stc *p_mdump_retain)
1844 struct qed_mdump_cmd_params mdump_cmd_params;
1847 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1848 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1849 mdump_cmd_params.p_data_dst = p_mdump_retain;
1850 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1852 rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1856 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1858 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1859 mdump_cmd_params.mcp_resp);
1866 static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1867 struct qed_ptt *p_ptt)
1869 struct mdump_retain_data_stc mdump_retain;
1872 /* In CMT mode - no need for more than a single acknowledgment to the
1873 * MFW, and no more than a single notification to the upper driver.
1875 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1878 rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1879 if (rc == 0 && mdump_retain.valid)
1881 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1883 mdump_retain.pf, mdump_retain.status);
1886 "The MFW notified that a critical error occurred in the device\n");
1889 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1890 qed_mcp_mdump_ack(p_hwfn, p_ptt);
1892 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1895 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1897 struct public_func shmem_info;
1900 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1903 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1904 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1905 offsetof(struct public_port, oem_cfg_port));
1906 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1907 OEM_CFG_CHANNEL_TYPE_OFFSET;
1908 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1910 "Incorrect UFP Channel type %d port_id 0x%02x\n",
1911 val, MFW_PORT(p_hwfn));
1913 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1914 if (val == OEM_CFG_SCHED_TYPE_ETS) {
1915 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1916 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1917 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1919 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1921 "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1922 val, MFW_PORT(p_hwfn));
1925 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1926 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1927 OEM_CFG_FUNC_TC_OFFSET;
1928 p_hwfn->ufp_info.tc = (u8)val;
1929 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1930 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1931 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1932 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1933 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1934 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1936 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1938 "Unknown Host priority control %d port_id 0x%02x\n",
1939 val, MFW_PORT(p_hwfn));
1943 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1944 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1945 p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1949 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1951 qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1953 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1954 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1955 qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1956 p_hwfn->ufp_info.tc);
1958 qed_qm_reconf(p_hwfn, p_ptt);
1959 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1960 /* Merge UFP TC with the dcbx TC data */
1961 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1962 QED_DCBX_OPERATIONAL_MIB);
1964 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1968 /* update storm FW with negotiation results */
1969 qed_sp_pf_update_ufp(p_hwfn);
1971 /* update stag pcp value */
1972 qed_sp_pf_update_stag(p_hwfn);
1977 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1978 struct qed_ptt *p_ptt)
1980 struct qed_mcp_info *info = p_hwfn->mcp_info;
1985 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1987 /* Read Messages from MFW */
1988 qed_mcp_read_mb(p_hwfn, p_ptt);
1990 /* Compare current messages to old ones */
1991 for (i = 0; i < info->mfw_mb_length; i++) {
1992 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1997 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1998 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1999 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2002 case MFW_DRV_MSG_LINK_CHANGE:
2003 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
2005 case MFW_DRV_MSG_VF_DISABLED:
2006 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
2008 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2009 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2010 QED_DCBX_REMOTE_LLDP_MIB);
2012 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2013 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2014 QED_DCBX_REMOTE_MIB);
2016 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2017 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2018 QED_DCBX_OPERATIONAL_MIB);
2020 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2021 qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
2023 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2024 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2026 case MFW_DRV_MSG_ERROR_RECOVERY:
2027 qed_mcp_handle_process_kill(p_hwfn, p_ptt);
2029 case MFW_DRV_MSG_GET_LAN_STATS:
2030 case MFW_DRV_MSG_GET_FCOE_STATS:
2031 case MFW_DRV_MSG_GET_ISCSI_STATS:
2032 case MFW_DRV_MSG_GET_RDMA_STATS:
2033 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2035 case MFW_DRV_MSG_BW_UPDATE:
2036 qed_mcp_update_bw(p_hwfn, p_ptt);
2038 case MFW_DRV_MSG_S_TAG_UPDATE:
2039 qed_mcp_update_stag(p_hwfn, p_ptt);
2041 case MFW_DRV_MSG_FAILURE_DETECTED:
2042 qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
2044 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2045 qed_mcp_handle_critical_error(p_hwfn, p_ptt);
2047 case MFW_DRV_MSG_GET_TLV_REQ:
2048 qed_mfw_tlv_req(p_hwfn);
2051 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2056 /* ACK everything */
2057 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2058 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
2060 /* MFW expect answer in BE, so we force write in that format */
2061 qed_wr(p_hwfn, p_ptt,
2062 info->mfw_mb_addr + sizeof(u32) +
2063 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2064 sizeof(u32) + i * sizeof(u32),
2070 "Received an MFW message indication but no new message!\n");
2074 /* Copy the new mfw messages into the shadow */
2075 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2080 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
2081 struct qed_ptt *p_ptt,
2082 u32 *p_mfw_ver, u32 *p_running_bundle_id)
2086 if (IS_VF(p_hwfn->cdev)) {
2087 if (p_hwfn->vf_iov_info) {
2088 struct pfvf_acquire_resp_tlv *p_resp;
2090 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2091 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2096 "VF requested MFW version prior to ACQUIRE\n");
2101 global_offsize = qed_rd(p_hwfn, p_ptt,
2102 SECTION_OFFSIZE_ADDR(p_hwfn->
2103 mcp_info->public_base,
2106 qed_rd(p_hwfn, p_ptt,
2107 SECTION_ADDR(global_offsize,
2108 0) + offsetof(struct public_global, mfw_ver));
2110 if (p_running_bundle_id != NULL) {
2111 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2112 SECTION_ADDR(global_offsize, 0) +
2113 offsetof(struct public_global,
2114 running_bundle_id));
2120 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2121 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2123 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2125 if (IS_VF(p_hwfn->cdev))
2128 /* Read the address of the nvm_cfg */
2129 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2130 if (!nvm_cfg_addr) {
2131 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2135 /* Read the offset of nvm_cfg1 */
2136 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2138 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2139 offsetof(struct nvm_cfg1, glob) +
2140 offsetof(struct nvm_cfg1_glob, mbi_version);
2141 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2143 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2144 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2145 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2150 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2151 struct qed_ptt *p_ptt, u32 *p_media_type)
2153 *p_media_type = MEDIA_UNSPECIFIED;
2155 if (IS_VF(p_hwfn->cdev))
2158 if (!qed_mcp_is_init(p_hwfn)) {
2159 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2164 *p_media_type = MEDIA_UNSPECIFIED;
2168 *p_media_type = qed_rd(p_hwfn, p_ptt,
2169 p_hwfn->mcp_info->port_addr +
2170 offsetof(struct public_port,
2176 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2177 struct qed_ptt *p_ptt,
2178 u32 *p_transceiver_state,
2179 u32 *p_transceiver_type)
2181 u32 transceiver_info;
2183 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2184 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2186 if (IS_VF(p_hwfn->cdev))
2189 if (!qed_mcp_is_init(p_hwfn)) {
2190 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2194 transceiver_info = qed_rd(p_hwfn, p_ptt,
2195 p_hwfn->mcp_info->port_addr +
2196 offsetof(struct public_port,
2199 *p_transceiver_state = (transceiver_info &
2200 ETH_TRANSCEIVER_STATE_MASK) >>
2201 ETH_TRANSCEIVER_STATE_OFFSET;
2203 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2204 *p_transceiver_type = (transceiver_info &
2205 ETH_TRANSCEIVER_TYPE_MASK) >>
2206 ETH_TRANSCEIVER_TYPE_OFFSET;
2208 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2212 static bool qed_is_transceiver_ready(u32 transceiver_state,
2213 u32 transceiver_type)
2215 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2216 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2217 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2223 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2224 struct qed_ptt *p_ptt, u32 *p_speed_mask)
2226 u32 transceiver_type, transceiver_state;
2229 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2234 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2238 switch (transceiver_type) {
2239 case ETH_TRANSCEIVER_TYPE_1G_LX:
2240 case ETH_TRANSCEIVER_TYPE_1G_SX:
2241 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2242 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2243 case ETH_TRANSCEIVER_TYPE_1000BASET:
2244 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2246 case ETH_TRANSCEIVER_TYPE_10G_SR:
2247 case ETH_TRANSCEIVER_TYPE_10G_LR:
2248 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2249 case ETH_TRANSCEIVER_TYPE_10G_ER:
2250 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2251 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2252 case ETH_TRANSCEIVER_TYPE_4x10G:
2253 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2255 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2256 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2257 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2258 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2259 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2260 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2262 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2263 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2264 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2265 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2266 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2268 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2269 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2271 case ETH_TRANSCEIVER_TYPE_25G_SR:
2272 case ETH_TRANSCEIVER_TYPE_25G_LR:
2273 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2274 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2275 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2276 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2277 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2279 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2280 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2281 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2282 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2283 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2284 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2285 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2287 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2288 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
2289 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2290 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2292 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2293 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2294 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2295 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2296 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2298 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2299 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2301 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2302 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2303 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2304 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2305 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2306 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2307 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2309 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2310 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2311 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2313 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2314 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2315 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2316 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2318 case ETH_TRANSCEIVER_TYPE_XLPPI:
2319 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2321 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2322 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
2323 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
2324 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2325 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2328 DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2330 *p_speed_mask = 0xff;
2337 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2338 struct qed_ptt *p_ptt, u32 *p_board_config)
2340 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2342 if (IS_VF(p_hwfn->cdev))
2345 if (!qed_mcp_is_init(p_hwfn)) {
2346 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2350 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2354 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2355 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2356 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2357 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2358 *p_board_config = qed_rd(p_hwfn, p_ptt,
2360 offsetof(struct nvm_cfg1_port,
2366 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2368 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2369 enum qed_pci_personality *p_proto)
2371 /* There wasn't ever a legacy MFW that published iwarp.
2372 * So at this point, this is either plain l2 or RoCE.
2374 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2375 *p_proto = QED_PCI_ETH_ROCE;
2377 *p_proto = QED_PCI_ETH;
2379 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2380 "According to Legacy capabilities, L2 personality is %08x\n",
2385 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2386 struct qed_ptt *p_ptt,
2387 enum qed_pci_personality *p_proto)
2389 u32 resp = 0, param = 0;
2392 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2393 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2396 if (resp != FW_MSG_CODE_OK) {
2397 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2398 "MFW lacks support for command; Returns %08x\n",
2404 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2405 *p_proto = QED_PCI_ETH;
2407 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2408 *p_proto = QED_PCI_ETH_ROCE;
2410 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2411 *p_proto = QED_PCI_ETH_IWARP;
2413 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2414 *p_proto = QED_PCI_ETH_RDMA;
2418 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2425 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2426 (u32) *p_proto, resp, param);
2431 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2432 struct public_func *p_info,
2433 struct qed_ptt *p_ptt,
2434 enum qed_pci_personality *p_proto)
2438 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2439 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2440 if (!IS_ENABLED(CONFIG_QED_RDMA))
2441 *p_proto = QED_PCI_ETH;
2442 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2443 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2445 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2446 *p_proto = QED_PCI_ISCSI;
2448 case FUNC_MF_CFG_PROTOCOL_NVMETCP:
2449 *p_proto = QED_PCI_NVMETCP;
2451 case FUNC_MF_CFG_PROTOCOL_FCOE:
2452 *p_proto = QED_PCI_FCOE;
2454 case FUNC_MF_CFG_PROTOCOL_ROCE:
2455 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2464 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2465 struct qed_ptt *p_ptt)
2467 struct qed_mcp_function_info *info;
2468 struct public_func shmem_info;
2470 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2471 info = &p_hwfn->mcp_info->func_info;
2473 info->pause_on_host = (shmem_info.config &
2474 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2476 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2478 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2479 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2483 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2485 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2486 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2487 info->mac[1] = (u8)(shmem_info.mac_upper);
2488 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2489 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2490 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2491 info->mac[5] = (u8)(shmem_info.mac_lower);
2493 /* Store primary MAC for later possible WoL */
2494 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2496 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2499 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2500 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2501 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2502 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2504 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2506 info->mtu = (u16)shmem_info.mtu_size;
2508 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2509 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2510 if (qed_mcp_is_init(p_hwfn)) {
2511 u32 resp = 0, param = 0;
2514 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2515 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2518 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2519 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2522 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2523 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
2524 info->pause_on_host, info->protocol,
2525 info->bandwidth_min, info->bandwidth_max,
2527 info->wwn_port, info->wwn_node,
2528 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2533 struct qed_mcp_link_params
2534 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2536 if (!p_hwfn || !p_hwfn->mcp_info)
2538 return &p_hwfn->mcp_info->link_input;
2541 struct qed_mcp_link_state
2542 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2544 if (!p_hwfn || !p_hwfn->mcp_info)
2546 return &p_hwfn->mcp_info->link_output;
2549 struct qed_mcp_link_capabilities
2550 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2552 if (!p_hwfn || !p_hwfn->mcp_info)
2554 return &p_hwfn->mcp_info->link_capabilities;
2557 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2559 u32 resp = 0, param = 0;
2562 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2563 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2565 /* Wait for the drain to complete before returning */
2571 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2572 struct qed_ptt *p_ptt, u32 *p_flash_size)
2576 if (IS_VF(p_hwfn->cdev))
2579 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2580 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2581 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2582 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2584 *p_flash_size = flash_size;
2589 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2591 struct qed_dev *cdev = p_hwfn->cdev;
2593 if (cdev->recov_in_prog) {
2595 "Avoid triggering a recovery since such a process is already in progress\n");
2599 DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2600 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2605 #define QED_RECOVERY_PROLOG_SLEEP_MS 100
2607 int qed_recovery_prolog(struct qed_dev *cdev)
2609 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2610 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2613 /* Allow ongoing PCIe transactions to complete */
2614 msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2616 /* Clear the PF's internal FID_enable in the PXP */
2617 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2620 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2627 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2628 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2630 u32 resp = 0, param = 0, rc_param = 0;
2633 /* Only Leader can configure MSIX, and need to take CMT into account */
2634 if (!IS_LEAD_HWFN(p_hwfn))
2636 num *= p_hwfn->cdev->num_hwfns;
2638 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2639 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2640 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2641 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2643 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2646 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2647 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2650 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2651 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2659 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2660 struct qed_ptt *p_ptt, u8 num)
2662 u32 resp = 0, param = num, rc_param = 0;
2665 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2666 param, &resp, &rc_param);
2668 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2669 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2672 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2673 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2679 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2680 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2682 if (QED_IS_BB(p_hwfn->cdev))
2683 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2685 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2689 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2690 struct qed_ptt *p_ptt,
2691 struct qed_mcp_drv_version *p_ver)
2693 struct qed_mcp_mb_params mb_params;
2694 struct drv_version_stc drv_version;
2699 memset(&drv_version, 0, sizeof(drv_version));
2700 drv_version.version = p_ver->version;
2701 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2702 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2703 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2706 memset(&mb_params, 0, sizeof(mb_params));
2707 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2708 mb_params.p_data_src = &drv_version;
2709 mb_params.data_src_size = sizeof(drv_version);
2710 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2712 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2717 /* A maximal 100 msec waiting time for the MCP to halt */
2718 #define QED_MCP_HALT_SLEEP_MS 10
2719 #define QED_MCP_HALT_MAX_RETRIES 10
2721 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2723 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2726 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2729 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2734 msleep(QED_MCP_HALT_SLEEP_MS);
2735 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2736 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2738 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2740 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2742 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2743 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2747 qed_mcp_cmd_set_blocking(p_hwfn, true);
2752 #define QED_MCP_RESUME_SLEEP_MS 10
2754 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2756 u32 cpu_mode, cpu_state;
2758 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2760 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2761 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2762 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2763 msleep(QED_MCP_RESUME_SLEEP_MS);
2764 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2766 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2768 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2769 cpu_mode, cpu_state);
2773 qed_mcp_cmd_set_blocking(p_hwfn, false);
2778 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2779 struct qed_ptt *p_ptt,
2780 enum qed_ov_client client)
2782 u32 resp = 0, param = 0;
2787 case QED_OV_CLIENT_DRV:
2788 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2790 case QED_OV_CLIENT_USER:
2791 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2793 case QED_OV_CLIENT_VENDOR_SPEC:
2794 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2797 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2801 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2802 drv_mb_param, &resp, ¶m);
2804 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2809 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2810 struct qed_ptt *p_ptt,
2811 enum qed_ov_driver_state drv_state)
2813 u32 resp = 0, param = 0;
2817 switch (drv_state) {
2818 case QED_OV_DRIVER_STATE_NOT_LOADED:
2819 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2821 case QED_OV_DRIVER_STATE_DISABLED:
2822 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2824 case QED_OV_DRIVER_STATE_ACTIVE:
2825 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2828 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2832 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2833 drv_mb_param, &resp, ¶m);
2835 DP_ERR(p_hwfn, "Failed to send driver state\n");
2840 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2841 struct qed_ptt *p_ptt, u16 mtu)
2843 u32 resp = 0, param = 0;
2847 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2848 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2849 drv_mb_param, &resp, ¶m);
2851 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2856 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2857 struct qed_ptt *p_ptt, u8 *mac)
2859 struct qed_mcp_mb_params mb_params;
2863 memset(&mb_params, 0, sizeof(mb_params));
2864 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2865 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2866 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2867 mb_params.param |= MCP_PF_ID(p_hwfn);
2869 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2870 * in 32-bit granularity.
2871 * So the MAC has to be set in native order [and not byte order],
2872 * otherwise it would be read incorrectly by MFW after swap.
2874 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2875 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2877 mb_params.p_data_src = (u8 *)mfw_mac;
2878 mb_params.data_src_size = 8;
2879 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2881 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2883 /* Store primary MAC for later possible WoL */
2884 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2889 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2890 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2892 u32 resp = 0, param = 0;
2896 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2897 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2898 "Can't change WoL configuration when WoL isn't supported\n");
2903 case QED_OV_WOL_DEFAULT:
2904 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2906 case QED_OV_WOL_DISABLED:
2907 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2909 case QED_OV_WOL_ENABLED:
2910 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2913 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2917 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2918 drv_mb_param, &resp, ¶m);
2920 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2922 /* Store the WoL update for a future unload */
2923 p_hwfn->cdev->wol_config = (u8)wol;
2928 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2929 struct qed_ptt *p_ptt,
2930 enum qed_ov_eswitch eswitch)
2932 u32 resp = 0, param = 0;
2937 case QED_OV_ESWITCH_NONE:
2938 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2940 case QED_OV_ESWITCH_VEB:
2941 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2943 case QED_OV_ESWITCH_VEPA:
2944 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2947 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2951 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2952 drv_mb_param, &resp, ¶m);
2954 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2959 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2960 struct qed_ptt *p_ptt, enum qed_led_mode mode)
2962 u32 resp = 0, param = 0, drv_mb_param;
2966 case QED_LED_MODE_ON:
2967 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2969 case QED_LED_MODE_OFF:
2970 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2972 case QED_LED_MODE_RESTORE:
2973 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2976 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2980 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2981 drv_mb_param, &resp, ¶m);
2986 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2987 struct qed_ptt *p_ptt, u32 mask_parities)
2989 u32 resp = 0, param = 0;
2992 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2993 mask_parities, &resp, ¶m);
2997 "MCP response failure for mask parities, aborting\n");
2998 } else if (resp != FW_MSG_CODE_OK) {
3000 "MCP did not acknowledge mask parity request. Old MFW?\n");
3007 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
3009 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
3010 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3011 u32 resp = 0, resp_param = 0;
3012 struct qed_ptt *p_ptt;
3015 p_ptt = qed_ptt_acquire(p_hwfn);
3019 while (bytes_left > 0) {
3020 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
3022 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3023 DRV_MSG_CODE_NVM_READ_NVRAM,
3026 DRV_MB_PARAM_NVM_LEN_OFFSET),
3029 (u32 *)(p_buf + offset));
3031 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
3032 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
3036 /* This can be a lengthy process, and it's possible scheduler
3037 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3039 if (bytes_left % 0x1000 <
3040 (bytes_left - read_len) % 0x1000)
3041 usleep_range(1000, 2000);
3044 bytes_left -= read_len;
3047 cdev->mcp_nvm_resp = resp;
3048 qed_ptt_release(p_hwfn, p_ptt);
3053 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
3055 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3056 struct qed_ptt *p_ptt;
3058 p_ptt = qed_ptt_acquire(p_hwfn);
3062 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
3063 qed_ptt_release(p_hwfn, p_ptt);
3068 int qed_mcp_nvm_write(struct qed_dev *cdev,
3069 u32 cmd, u32 addr, u8 *p_buf, u32 len)
3071 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
3072 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3073 struct qed_ptt *p_ptt;
3076 p_ptt = qed_ptt_acquire(p_hwfn);
3081 case QED_PUT_FILE_BEGIN:
3082 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
3084 case QED_PUT_FILE_DATA:
3085 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3087 case QED_NVM_WRITE_NVRAM:
3088 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3091 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
3096 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
3097 while (buf_idx < len) {
3098 if (cmd == QED_PUT_FILE_BEGIN)
3101 nvm_offset = ((buf_size <<
3102 DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3104 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3105 &resp, ¶m, buf_size,
3106 (u32 *)&p_buf[buf_idx]);
3108 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3109 resp = FW_MSG_CODE_ERROR;
3113 if (resp != FW_MSG_CODE_OK &&
3114 resp != FW_MSG_CODE_NVM_OK &&
3115 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3117 "nvm write failed, resp = 0x%08x\n", resp);
3122 /* This can be a lengthy process, and it's possible scheduler
3123 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3125 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3126 usleep_range(1000, 2000);
3128 /* For MBI upgrade, MFW response includes the next buffer offset
3129 * to be delivered to MFW.
3131 if (param && cmd == QED_PUT_FILE_DATA) {
3132 buf_idx = QED_MFW_GET_FIELD(param,
3133 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3134 buf_size = QED_MFW_GET_FIELD(param,
3135 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3137 buf_idx += buf_size;
3138 buf_size = min_t(u32, (len - buf_idx),
3139 MCP_DRV_NVM_BUF_LEN);
3143 cdev->mcp_nvm_resp = resp;
3145 qed_ptt_release(p_hwfn, p_ptt);
3150 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3151 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3153 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3157 nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3158 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3159 nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3160 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3165 while (bytes_left > 0) {
3166 bytes_to_copy = min_t(u32, bytes_left,
3167 MAX_I2C_TRANSACTION_SIZE);
3168 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3169 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3170 nvm_offset |= ((addr + offset) <<
3171 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3172 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3173 nvm_offset |= (bytes_to_copy <<
3174 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3175 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3176 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3177 DRV_MSG_CODE_TRANSCEIVER_READ,
3178 nvm_offset, &resp, ¶m, &buf_size,
3179 (u32 *)(p_buf + offset));
3182 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3187 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3189 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3193 bytes_left -= buf_size;
3199 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3201 u32 drv_mb_param = 0, rsp, param;
3204 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3205 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3207 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3208 drv_mb_param, &rsp, ¶m);
3213 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3214 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3220 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3222 u32 drv_mb_param, rsp, param;
3225 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3226 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3228 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3229 drv_mb_param, &rsp, ¶m);
3234 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3235 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3241 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3242 struct qed_ptt *p_ptt,
3245 u32 drv_mb_param = 0, rsp;
3248 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3249 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3251 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3252 drv_mb_param, &rsp, num_images);
3256 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3262 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3263 struct qed_ptt *p_ptt,
3264 struct bist_nvm_image_att *p_image_att,
3267 u32 buf_size = 0, param, resp = 0, resp_param = 0;
3270 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3271 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3272 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3274 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3275 DRV_MSG_CODE_BIST_TEST, param,
3278 (u32 *)p_image_att);
3282 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3283 (p_image_att->return_code != 1))
3289 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3291 struct qed_nvm_image_info nvm_info;
3292 struct qed_ptt *p_ptt;
3296 if (p_hwfn->nvm_info.valid)
3299 p_ptt = qed_ptt_acquire(p_hwfn);
3301 DP_ERR(p_hwfn, "failed to acquire ptt\n");
3305 /* Acquire from MFW the amount of available images */
3306 nvm_info.num_images = 0;
3307 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3308 p_ptt, &nvm_info.num_images);
3309 if (rc == -EOPNOTSUPP) {
3310 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3312 } else if (rc || !nvm_info.num_images) {
3313 DP_ERR(p_hwfn, "Failed getting number of images\n");
3317 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3318 sizeof(struct bist_nvm_image_att),
3320 if (!nvm_info.image_att) {
3325 /* Iterate over images and get their attributes */
3326 for (i = 0; i < nvm_info.num_images; i++) {
3327 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3328 &nvm_info.image_att[i], i);
3331 "Failed getting image index %d attributes\n", i);
3335 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3336 nvm_info.image_att[i].len);
3339 /* Update hwfn's nvm_info */
3340 if (nvm_info.num_images) {
3341 p_hwfn->nvm_info.num_images = nvm_info.num_images;
3342 kfree(p_hwfn->nvm_info.image_att);
3343 p_hwfn->nvm_info.image_att = nvm_info.image_att;
3344 p_hwfn->nvm_info.valid = true;
3347 qed_ptt_release(p_hwfn, p_ptt);
3351 kfree(nvm_info.image_att);
3353 qed_ptt_release(p_hwfn, p_ptt);
3357 void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3359 kfree(p_hwfn->nvm_info.image_att);
3360 p_hwfn->nvm_info.image_att = NULL;
3361 p_hwfn->nvm_info.valid = false;
3365 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3366 enum qed_nvm_images image_id,
3367 struct qed_nvm_image_att *p_image_att)
3369 enum nvm_image_type type;
3372 /* Translate image_id into MFW definitions */
3374 case QED_NVM_IMAGE_ISCSI_CFG:
3375 type = NVM_TYPE_ISCSI_CFG;
3377 case QED_NVM_IMAGE_FCOE_CFG:
3378 type = NVM_TYPE_FCOE_CFG;
3380 case QED_NVM_IMAGE_MDUMP:
3381 type = NVM_TYPE_MDUMP;
3383 case QED_NVM_IMAGE_NVM_CFG1:
3384 type = NVM_TYPE_NVM_CFG1;
3386 case QED_NVM_IMAGE_DEFAULT_CFG:
3387 type = NVM_TYPE_DEFAULT_CFG;
3389 case QED_NVM_IMAGE_NVM_META:
3390 type = NVM_TYPE_META;
3393 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3398 qed_mcp_nvm_info_populate(p_hwfn);
3399 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3400 if (type == p_hwfn->nvm_info.image_att[i].image_type)
3402 if (i == p_hwfn->nvm_info.num_images) {
3403 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3404 "Failed to find nvram image of type %08x\n",
3409 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3410 p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3415 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3416 enum qed_nvm_images image_id,
3417 u8 *p_buffer, u32 buffer_len)
3419 struct qed_nvm_image_att image_att;
3422 memset(p_buffer, 0, buffer_len);
3424 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3428 /* Validate sizes - both the image's and the supplied buffer's */
3429 if (image_att.length <= 4) {
3430 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3431 "Image [%d] is too small - only %d bytes\n",
3432 image_id, image_att.length);
3436 if (image_att.length > buffer_len) {
3439 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3440 image_id, image_att.length, buffer_len);
3444 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3445 p_buffer, image_att.length);
3448 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3450 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3454 mfw_res_id = RESOURCE_NUM_SB_E;
3457 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3460 mfw_res_id = RESOURCE_NUM_VPORT_E;
3463 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3466 mfw_res_id = RESOURCE_NUM_PQ_E;
3469 mfw_res_id = RESOURCE_NUM_RL_E;
3473 /* Each VFC resource can accommodate both a MAC and a VLAN */
3474 mfw_res_id = RESOURCE_VFC_FILTER_E;
3477 mfw_res_id = RESOURCE_ILT_E;
3479 case QED_LL2_RAM_QUEUE:
3480 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3482 case QED_LL2_CTX_QUEUE:
3483 mfw_res_id = RESOURCE_LL2_CQS_E;
3485 case QED_RDMA_CNQ_RAM:
3487 /* CNQ/CMDQS are the same resource */
3488 mfw_res_id = RESOURCE_CQS_E;
3490 case QED_RDMA_STATS_QUEUE:
3491 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3494 mfw_res_id = RESOURCE_BDQ_E;
3503 #define QED_RESC_ALLOC_VERSION_MAJOR 2
3504 #define QED_RESC_ALLOC_VERSION_MINOR 0
3505 #define QED_RESC_ALLOC_VERSION \
3506 ((QED_RESC_ALLOC_VERSION_MAJOR << \
3507 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3508 (QED_RESC_ALLOC_VERSION_MINOR << \
3509 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3511 struct qed_resc_alloc_in_params {
3513 enum qed_resources res_id;
3517 struct qed_resc_alloc_out_params {
3528 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3529 struct qed_ptt *p_ptt,
3530 struct qed_resc_alloc_in_params *p_in_params,
3531 struct qed_resc_alloc_out_params *p_out_params)
3533 struct qed_mcp_mb_params mb_params;
3534 struct resource_info mfw_resc_info;
3537 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3539 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3540 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3542 "Failed to match resource %d [%s] with the MFW resources\n",
3543 p_in_params->res_id,
3544 qed_hw_get_resc_name(p_in_params->res_id));
3548 switch (p_in_params->cmd) {
3549 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3550 mfw_resc_info.size = p_in_params->resc_max_val;
3552 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3555 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3560 memset(&mb_params, 0, sizeof(mb_params));
3561 mb_params.cmd = p_in_params->cmd;
3562 mb_params.param = QED_RESC_ALLOC_VERSION;
3563 mb_params.p_data_src = &mfw_resc_info;
3564 mb_params.data_src_size = sizeof(mfw_resc_info);
3565 mb_params.p_data_dst = mb_params.p_data_src;
3566 mb_params.data_dst_size = mb_params.data_src_size;
3570 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3572 p_in_params->res_id,
3573 qed_hw_get_resc_name(p_in_params->res_id),
3574 QED_MFW_GET_FIELD(mb_params.param,
3575 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3576 QED_MFW_GET_FIELD(mb_params.param,
3577 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3578 p_in_params->resc_max_val);
3580 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3584 p_out_params->mcp_resp = mb_params.mcp_resp;
3585 p_out_params->mcp_param = mb_params.mcp_param;
3586 p_out_params->resc_num = mfw_resc_info.size;
3587 p_out_params->resc_start = mfw_resc_info.offset;
3588 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3589 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3590 p_out_params->flags = mfw_resc_info.flags;
3594 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3595 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3596 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3597 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3598 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3599 p_out_params->resc_num,
3600 p_out_params->resc_start,
3601 p_out_params->vf_resc_num,
3602 p_out_params->vf_resc_start, p_out_params->flags);
3608 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3609 struct qed_ptt *p_ptt,
3610 enum qed_resources res_id,
3611 u32 resc_max_val, u32 *p_mcp_resp)
3613 struct qed_resc_alloc_out_params out_params;
3614 struct qed_resc_alloc_in_params in_params;
3617 memset(&in_params, 0, sizeof(in_params));
3618 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3619 in_params.res_id = res_id;
3620 in_params.resc_max_val = resc_max_val;
3621 memset(&out_params, 0, sizeof(out_params));
3622 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3627 *p_mcp_resp = out_params.mcp_resp;
3633 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3634 struct qed_ptt *p_ptt,
3635 enum qed_resources res_id,
3636 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3638 struct qed_resc_alloc_out_params out_params;
3639 struct qed_resc_alloc_in_params in_params;
3642 memset(&in_params, 0, sizeof(in_params));
3643 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3644 in_params.res_id = res_id;
3645 memset(&out_params, 0, sizeof(out_params));
3646 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3651 *p_mcp_resp = out_params.mcp_resp;
3653 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3654 *p_resc_num = out_params.resc_num;
3655 *p_resc_start = out_params.resc_start;
3661 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3663 u32 mcp_resp, mcp_param;
3665 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3666 &mcp_resp, &mcp_param);
3669 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3670 struct qed_ptt *p_ptt,
3671 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3675 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3676 p_mcp_resp, p_mcp_param);
3680 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3682 "The resource command is unsupported by the MFW\n");
3686 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3687 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3690 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3699 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3700 struct qed_ptt *p_ptt,
3701 struct qed_resc_lock_params *p_params)
3703 u32 param = 0, mcp_resp, mcp_param;
3707 switch (p_params->timeout) {
3708 case QED_MCP_RESC_LOCK_TO_DEFAULT:
3709 opcode = RESOURCE_OPCODE_REQ;
3710 p_params->timeout = 0;
3712 case QED_MCP_RESC_LOCK_TO_NONE:
3713 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3714 p_params->timeout = 0;
3717 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3721 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3722 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3723 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3727 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3728 param, p_params->timeout, opcode, p_params->resource);
3730 /* Attempt to acquire the resource */
3731 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3735 /* Analyze the response */
3736 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3737 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3741 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3742 mcp_param, opcode, p_params->owner);
3745 case RESOURCE_OPCODE_GNT:
3746 p_params->b_granted = true;
3748 case RESOURCE_OPCODE_BUSY:
3749 p_params->b_granted = false;
3753 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3762 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3763 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3769 /* No need for an interval before the first iteration */
3771 if (p_params->sleep_b4_retry) {
3772 u16 retry_interval_in_ms =
3773 DIV_ROUND_UP(p_params->retry_interval,
3776 msleep(retry_interval_in_ms);
3778 udelay(p_params->retry_interval);
3782 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3786 if (p_params->b_granted)
3788 } while (retry_cnt++ < p_params->retry_num);
3794 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3795 struct qed_ptt *p_ptt,
3796 struct qed_resc_unlock_params *p_params)
3798 u32 param = 0, mcp_resp, mcp_param;
3802 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3803 : RESOURCE_OPCODE_RELEASE;
3804 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3805 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3807 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3808 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3809 param, opcode, p_params->resource);
3811 /* Attempt to release the resource */
3812 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3816 /* Analyze the response */
3817 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3819 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3820 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3824 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3826 "Resource unlock request for an already released resource [%d]\n",
3827 p_params->resource);
3829 case RESOURCE_OPCODE_RELEASED:
3830 p_params->b_released = true;
3832 case RESOURCE_OPCODE_WRONG_OWNER:
3833 p_params->b_released = false;
3837 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3845 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3846 struct qed_resc_unlock_params *p_unlock,
3848 resource, bool b_is_permanent)
3851 memset(p_lock, 0, sizeof(*p_lock));
3853 /* Permanent resources don't require aging, and there's no
3854 * point in trying to acquire them more than once since it's
3855 * unexpected another entity would release them.
3857 if (b_is_permanent) {
3858 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3860 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3861 p_lock->retry_interval =
3862 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3863 p_lock->sleep_b4_retry = true;
3866 p_lock->resource = resource;
3870 memset(p_unlock, 0, sizeof(*p_unlock));
3871 p_unlock->resource = resource;
3875 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3877 return !!(p_hwfn->mcp_info->capabilities &
3878 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3881 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3886 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3887 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3889 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3890 "MFW supported features: %08x\n",
3891 p_hwfn->mcp_info->capabilities);
3896 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3898 u32 mcp_resp, mcp_param, features;
3900 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3901 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
3902 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
3904 if (QED_IS_E5(p_hwfn->cdev))
3906 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
3908 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3909 features, &mcp_resp, &mcp_param);
3912 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3914 struct qed_mcp_mb_params mb_params = {0};
3915 struct qed_dev *cdev = p_hwfn->cdev;
3916 u8 fir_valid, l2_valid;
3919 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3920 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3924 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3926 "The get_engine_config command is unsupported by the MFW\n");
3930 fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3931 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3934 QED_MFW_GET_FIELD(mb_params.mcp_param,
3935 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3937 l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3938 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3940 cdev->l2_affin_hint =
3941 QED_MFW_GET_FIELD(mb_params.mcp_param,
3942 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3945 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3946 fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3951 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3953 struct qed_mcp_mb_params mb_params = {0};
3954 struct qed_dev *cdev = p_hwfn->cdev;
3957 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3958 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3962 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3964 "The get_ppfid_bitmap command is unsupported by the MFW\n");
3968 cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3969 FW_MB_PARAM_PPFID_BITMAP);
3971 DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3972 cdev->ppfid_bitmap);
3977 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3978 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3981 u32 mb_param = 0, resp, param;
3984 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3985 if (flags & QED_NVM_CFG_OPTION_INIT)
3986 QED_MFW_SET_FIELD(mb_param,
3987 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3988 if (flags & QED_NVM_CFG_OPTION_FREE)
3989 QED_MFW_SET_FIELD(mb_param,
3990 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3991 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3992 QED_MFW_SET_FIELD(mb_param,
3993 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3994 QED_MFW_SET_FIELD(mb_param,
3995 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3999 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4000 DRV_MSG_CODE_GET_NVM_CFG_OPTION,
4001 mb_param, &resp, ¶m, p_len, (u32 *)p_buf);
4006 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4007 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
4010 u32 mb_param = 0, resp, param;
4012 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
4013 if (flags & QED_NVM_CFG_OPTION_ALL)
4014 QED_MFW_SET_FIELD(mb_param,
4015 DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
4016 if (flags & QED_NVM_CFG_OPTION_INIT)
4017 QED_MFW_SET_FIELD(mb_param,
4018 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4019 if (flags & QED_NVM_CFG_OPTION_COMMIT)
4020 QED_MFW_SET_FIELD(mb_param,
4021 DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
4022 if (flags & QED_NVM_CFG_OPTION_FREE)
4023 QED_MFW_SET_FIELD(mb_param,
4024 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4025 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4026 QED_MFW_SET_FIELD(mb_param,
4027 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4028 QED_MFW_SET_FIELD(mb_param,
4029 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4033 return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
4034 DRV_MSG_CODE_SET_NVM_CFG_OPTION,
4035 mb_param, &resp, ¶m, len, (u32 *)p_buf);
4038 #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
4039 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
4040 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4041 (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4044 __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4045 struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
4047 struct qed_mcp_mb_params mb_params;
4050 if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
4052 "Debug data size is %d while it should not exceed %d\n",
4053 size, QED_MCP_DBG_DATA_MAX_SIZE);
4057 memset(&mb_params, 0, sizeof(mb_params));
4058 mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
4059 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
4060 mb_params.p_data_src = p_buf;
4061 mb_params.data_src_size = size;
4062 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4066 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4068 "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4070 } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
4071 DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
4073 } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
4075 "Failed to send debug data to the MFW [resp 0x%08x]\n",
4076 mb_params.mcp_resp);
4083 enum qed_mcp_dbg_data_type {
4084 QED_MCP_DBG_DATA_TYPE_RAW,
4087 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4088 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
4089 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
4090 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
4091 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
4092 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
4093 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4094 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
4095 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
4097 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
4098 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4101 qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4102 struct qed_ptt *p_ptt,
4103 enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4105 u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4106 u32 tmp_size = size, *p_header, *p_payload;
4111 p_header = (u32 *)raw_data;
4112 p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4114 seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4116 /* First chunk is marked as 'first' */
4117 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4120 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4121 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4122 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4123 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4125 while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4126 memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4127 rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4128 QED_MCP_DBG_DATA_MAX_SIZE);
4132 /* Clear the 'first' marking after sending the first chunk */
4133 if (p_tmp_buf == p_buf) {
4134 flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4135 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4139 p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4140 tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4143 /* Last chunk is marked as 'last' */
4144 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4145 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4146 memcpy(p_payload, p_tmp_buf, tmp_size);
4148 /* Casting the left size to u8 is ok since at this point it is <= 32 */
4149 return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4150 (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4155 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4156 struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4158 return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4159 QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);