2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
69 #define NUM_IB_PORTS 1
72 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76 module_param(num_vls, uint, S_IRUGO);
77 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
86 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87 module_param(rcv_intr_timeout, uint, S_IRUGO);
88 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90 uint rcv_intr_count = 16; /* same as qib */
91 module_param(rcv_intr_count, uint, S_IRUGO);
92 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94 ushort link_crc_mask = SUPPORTED_CRCS;
95 module_param(link_crc_mask, ushort, S_IRUGO);
96 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
99 module_param_named(loopback, loopback, uint, S_IRUGO);
100 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102 /* Other driver tunables */
103 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104 static ushort crc_14b_sideband = 1;
105 static uint use_flr = 1;
106 uint quick_linkup; /* skip LNI */
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
116 /* str must be a string constant */
117 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
120 /* Send Error Consequences */
121 #define SEC_WRITE_DROPPED 0x1
122 #define SEC_PACKET_DROPPED 0x2
123 #define SEC_SC_HALTED 0x4 /* per-context only */
124 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126 #define DEFAULT_KRCVQS 2
127 #define MIN_KERNEL_KCTXTS 2
128 #define FIRST_KERNEL_KCTXT 1
131 * RSM instance allocation
133 * 1 - User Fecn Handling
136 #define RSM_INS_VERBS 0
137 #define RSM_INS_FECN 1
138 #define RSM_INS_VNIC 2
140 /* Bit offset into the GUID which carries HFI id information */
141 #define GUID_HFI_INDEX_SHIFT 39
143 /* extract the emulation revision */
144 #define emulator_rev(dd) ((dd)->irev >> 8)
145 /* parallel and serial emulation versions are 3 and 4 respectively */
146 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
149 /* RSM fields for Verbs */
151 #define IB_PACKET_TYPE 2ull
152 #define QW_SHIFT 6ull
154 #define QPN_WIDTH 7ull
156 /* LRH.BTH: QW 0, OFFSET 48 - for match */
157 #define LRH_BTH_QW 0ull
158 #define LRH_BTH_BIT_OFFSET 48ull
159 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161 #define LRH_BTH_SELECT
162 #define LRH_BTH_MASK 3ull
163 #define LRH_BTH_VALUE 2ull
165 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166 #define LRH_SC_QW 0ull
167 #define LRH_SC_BIT_OFFSET 56ull
168 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170 #define LRH_SC_MASK 128ull
171 #define LRH_SC_VALUE 0ull
173 /* SC[n..0] QW 0, OFFSET 60 - for select */
174 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
176 /* QPN[m+n:1] QW 1, OFFSET 1 */
177 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
179 /* RSM fields for Vnic */
180 /* L2_TYPE: QW 0, OFFSET 61 - for match */
181 #define L2_TYPE_QW 0ull
182 #define L2_TYPE_BIT_OFFSET 61ull
183 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185 #define L2_TYPE_MASK 3ull
186 #define L2_16B_VALUE 2ull
188 /* L4_TYPE QW 1, OFFSET 0 - for match */
189 #define L4_TYPE_QW 1ull
190 #define L4_TYPE_BIT_OFFSET 0ull
191 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193 #define L4_16B_TYPE_MASK 0xFFull
194 #define L4_16B_ETH_VALUE 0x78ull
196 /* 16B VESWID - for select */
197 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198 /* 16B ENTROPY - for select */
199 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
201 /* defines to build power on SC2VL table */
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
223 #define DC_SC_VL_VAL( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
260 /* all CceStatus sub-block freeze bits */
261 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265 /* all CceStatus sub-block TXE pause bits */
266 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269 /* all CceStatus sub-block RXE pause bits */
270 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
272 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
278 static struct flag_table cce_err_status_flags[] = {
279 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341 /*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
367 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368 static struct flag_table misc_err_status_flags[] = {
369 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
385 * TXE PIO Error flags and consequences
387 static struct flag_table pio_err_status_flags[] = {
388 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394 /* 2*/ FLAG_ENTRY("PioCsrParity",
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439 /*17*/ FLAG_ENTRY("PioInitSmIn",
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451 /*21*/ FLAG_ENTRY("PioWriteDataParity",
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454 /*22*/ FLAG_ENTRY("PioStateMachine",
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466 /*26*/ FLAG_ENTRY("PioVlfSopParity",
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469 /*27*/ FLAG_ENTRY("PioVlFifoParity",
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
479 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
494 /* TXE PIO errors that cause an SPC freeze */
495 #define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
527 * TXE SDMA Error flags
529 static struct flag_table sdma_err_status_flags[] = {
530 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
541 /* TXE SDMA errors that cause an SPC freeze */
542 #define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
547 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548 #define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
554 * TXE Egress Error flags
556 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557 static struct flag_table egress_err_status_flags[] = {
558 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
561 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
566 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
571 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
655 * TXE Egress Error Info flags
657 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658 static struct flag_table egress_err_info_flags[] = {
659 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
683 /* TXE Egress errors that cause an SPC freeze */
684 #define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
704 * TXE Send error flags
706 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707 static struct flag_table send_err_status_flags[] = {
708 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
709 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
714 * TXE Send Context Error flags and consequences
716 static struct flag_table sc_err_status_flags[] = {
717 /* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720 /* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726 /* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
736 * RXE Receive Error flags
738 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739 static struct flag_table rxe_err_status_flags[] = {
740 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
818 /* RXE errors that will trigger an SPC freeze */
819 #define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
865 #define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
873 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874 static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
926 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927 static struct flag_table lcb_err_flags[] = {
928 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
969 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970 static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
985 * DC8051 Information Error flags
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
989 static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
994 FAILED_SERDES_INTERNAL_LOOPBACK),
995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
1009 * DC8051 Information Host Information flags
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013 static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
1015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
1017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
1023 FLAG_ENTRY0("Link width downgraded", 0x0200),
1026 static u32 encoded_size(u32 size);
1027 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1031 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1039 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1068 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1071 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1073 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1089 * Error interrupt table entry. This is used as input to the interrupt
1090 * "clear down" routine used for all second tier error interrupt register.
1091 * Second tier interrupt registers have a single bit representing them
1092 * in the top-level CceIntStatus.
1094 struct err_reg_info {
1095 u32 status; /* status CSR offset */
1096 u32 clear; /* clear CSR offset */
1097 u32 mask; /* mask CSR offset */
1098 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1102 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1103 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1104 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1107 * Helpers for building HFI and DC error interrupt table entries. Different
1108 * helpers are needed because of inconsistent register names.
1110 #define EE(reg, handler, desc) \
1111 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113 #define DC_EE1(reg, handler, desc) \
1114 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1115 #define DC_EE2(reg, handler, desc) \
1116 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1119 * Table of the "misc" grouping of error interrupts. Each entry refers to
1120 * another register containing more information.
1122 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1123 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1124 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1125 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1126 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1127 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1128 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1129 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1130 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1131 /* the rest are reserved */
1135 * Index into the Various section of the interrupt sources
1136 * corresponding to the Critical Temperature interrupt.
1138 #define TCRIT_INT_SOURCE 4
1141 * SDMA error interrupt entry - refers to another register containing more
1144 static const struct err_reg_info sdma_eng_err =
1145 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1148 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1149 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1150 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1151 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1152 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1153 /* rest are reserved */
1157 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1158 * register can not be derived from the MTU value because 10K is not
1159 * a power of 2. Therefore, we need a constant. Everything else can
1162 #define DCC_CFG_PORT_MTU_CAP_10240 7
1165 * Table of the DC grouping of error interrupts. Each entry refers to
1166 * another register containing more information.
1168 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1169 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1170 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1171 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1172 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1173 /* the rest are reserved */
1183 * csr to read for name (if applicable)
1188 * offset into dd or ppd to store the counter's value
1198 * accessor for stat element, context either dd or ppd
1200 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1201 int mode, u64 data);
1204 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1205 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1217 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 (counter * 8 + RCV_COUNTER_ARRAY32), \
1220 0, flags | CNTR_32BIT, \
1221 port_access_u32_csr)
1223 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 (counter * 8 + RCV_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1230 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 (counter * 8 + RCV_COUNTER_ARRAY64), \
1234 port_access_u64_csr)
1236 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 (counter * 8 + RCV_COUNTER_ARRAY64), \
1242 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1243 #define OVR_ELM(ctx) \
1244 CNTR_ELEM("RcvHdrOvr" #ctx, \
1245 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1246 0, CNTR_NORMAL, port_access_u64_csr)
1249 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 (counter * 8 + SEND_COUNTER_ARRAY32), \
1252 0, flags | CNTR_32BIT, \
1253 port_access_u32_csr)
1256 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 (counter * 8 + SEND_COUNTER_ARRAY64), \
1260 port_access_u64_csr)
1262 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 counter * 8 + SEND_COUNTER_ARRAY64, \
1270 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 (counter * 8 + CCE_COUNTER_ARRAY32), \
1273 0, flags | CNTR_32BIT, \
1276 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1279 0, flags | CNTR_32BIT, \
1283 #define DC_PERF_CNTR(name, counter, flags) \
1290 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1298 #define SW_IBP_CNTR(name, cntr) \
1306 * hfi_addr_from_offset - return addr for readq/writeq
1307 * @dd - the dd device
1308 * @offset - the offset of the CSR within bar0
1310 * This routine selects the appropriate base address
1311 * based on the indicated offset.
1313 static inline void __iomem *hfi1_addr_from_offset(
1314 const struct hfi1_devdata *dd,
1317 if (offset >= dd->base2_start)
1318 return dd->kregbase2 + (offset - dd->base2_start);
1319 return dd->kregbase1 + offset;
1323 * read_csr - read CSR at the indicated offset
1324 * @dd - the dd device
1325 * @offset - the offset of the CSR within bar0
1327 * Return: the value read or all FF's if there
1330 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332 if (dd->flags & HFI1_PRESENT)
1333 return readq(hfi1_addr_from_offset(dd, offset));
1338 * write_csr - write CSR at the indicated offset
1339 * @dd - the dd device
1340 * @offset - the offset of the CSR within bar0
1341 * @value - value to write
1343 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345 if (dd->flags & HFI1_PRESENT) {
1346 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348 /* avoid write to RcvArray */
1349 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351 writeq(value, base);
1356 * get_csr_addr - return te iomem address for offset
1357 * @dd - the dd device
1358 * @offset - the offset of the CSR within bar0
1360 * Return: The iomem address to use in subsequent
1361 * writeq/readq operations.
1363 void __iomem *get_csr_addr(
1364 const struct hfi1_devdata *dd,
1367 if (dd->flags & HFI1_PRESENT)
1368 return hfi1_addr_from_offset(dd, offset);
1372 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1373 int mode, u64 value)
1377 if (mode == CNTR_MODE_R) {
1378 ret = read_csr(dd, csr);
1379 } else if (mode == CNTR_MODE_W) {
1380 write_csr(dd, csr, value);
1383 dd_dev_err(dd, "Invalid cntr register access mode");
1387 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1392 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1393 void *context, int vl, int mode, u64 data)
1395 struct hfi1_devdata *dd = context;
1396 u64 csr = entry->csr;
1398 if (entry->flags & CNTR_SDMA) {
1399 if (vl == CNTR_INVALID_VL)
1403 if (vl != CNTR_INVALID_VL)
1406 return read_write_csr(dd, csr, mode, data);
1409 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1410 void *context, int idx, int mode, u64 data)
1412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414 if (dd->per_sdma && idx < dd->num_sdma)
1415 return dd->per_sdma[idx].err_cnt;
1419 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1420 void *context, int idx, int mode, u64 data)
1422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424 if (dd->per_sdma && idx < dd->num_sdma)
1425 return dd->per_sdma[idx].sdma_int_cnt;
1429 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1430 void *context, int idx, int mode, u64 data)
1432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434 if (dd->per_sdma && idx < dd->num_sdma)
1435 return dd->per_sdma[idx].idle_int_cnt;
1439 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1440 void *context, int idx, int mode,
1443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445 if (dd->per_sdma && idx < dd->num_sdma)
1446 return dd->per_sdma[idx].progress_int_cnt;
1450 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1451 int vl, int mode, u64 data)
1453 struct hfi1_devdata *dd = context;
1456 u64 csr = entry->csr;
1458 if (entry->flags & CNTR_VL) {
1459 if (vl == CNTR_INVALID_VL)
1463 if (vl != CNTR_INVALID_VL)
1467 val = read_write_csr(dd, csr, mode, data);
1471 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1472 int vl, int mode, u64 data)
1474 struct hfi1_devdata *dd = context;
1475 u32 csr = entry->csr;
1478 if (vl != CNTR_INVALID_VL)
1480 if (mode == CNTR_MODE_R)
1481 ret = read_lcb_csr(dd, csr, &data);
1482 else if (mode == CNTR_MODE_W)
1483 ret = write_lcb_csr(dd, csr, data);
1486 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1490 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1495 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1496 int vl, int mode, u64 data)
1498 struct hfi1_pportdata *ppd = context;
1500 if (vl != CNTR_INVALID_VL)
1502 return read_write_csr(ppd->dd, entry->csr, mode, data);
1505 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1506 void *context, int vl, int mode, u64 data)
1508 struct hfi1_pportdata *ppd = context;
1510 u64 csr = entry->csr;
1512 if (entry->flags & CNTR_VL) {
1513 if (vl == CNTR_INVALID_VL)
1517 if (vl != CNTR_INVALID_VL)
1520 val = read_write_csr(ppd->dd, csr, mode, data);
1524 /* Software defined */
1525 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1530 if (mode == CNTR_MODE_R) {
1532 } else if (mode == CNTR_MODE_W) {
1536 dd_dev_err(dd, "Invalid cntr sw access mode");
1540 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1545 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1546 int vl, int mode, u64 data)
1548 struct hfi1_pportdata *ppd = context;
1550 if (vl != CNTR_INVALID_VL)
1552 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1555 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1556 int vl, int mode, u64 data)
1558 struct hfi1_pportdata *ppd = context;
1560 if (vl != CNTR_INVALID_VL)
1562 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1565 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1566 void *context, int vl, int mode,
1569 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571 if (vl != CNTR_INVALID_VL)
1573 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1576 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1577 void *context, int vl, int mode, u64 data)
1579 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1583 if (vl == CNTR_INVALID_VL)
1584 counter = &ppd->port_xmit_discards;
1585 else if (vl >= 0 && vl < C_VL_COUNT)
1586 counter = &ppd->port_xmit_discards_vl[vl];
1590 return read_write_sw(ppd->dd, counter, mode, data);
1593 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1594 void *context, int vl, int mode,
1597 struct hfi1_pportdata *ppd = context;
1599 if (vl != CNTR_INVALID_VL)
1602 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1606 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1607 void *context, int vl, int mode, u64 data)
1609 struct hfi1_pportdata *ppd = context;
1611 if (vl != CNTR_INVALID_VL)
1614 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1618 u64 get_all_cpu_total(u64 __percpu *cntr)
1623 for_each_possible_cpu(cpu)
1624 counter += *per_cpu_ptr(cntr, cpu);
1628 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630 int vl, int mode, u64 data)
1634 if (vl != CNTR_INVALID_VL)
1637 if (mode == CNTR_MODE_R) {
1638 ret = get_all_cpu_total(cntr) - *z_val;
1639 } else if (mode == CNTR_MODE_W) {
1640 /* A write can only zero the counter */
1642 *z_val = get_all_cpu_total(cntr);
1644 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1653 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1654 void *context, int vl, int mode, u64 data)
1656 struct hfi1_devdata *dd = context;
1658 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1662 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1663 void *context, int vl, int mode, u64 data)
1665 struct hfi1_devdata *dd = context;
1667 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1671 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1672 void *context, int vl, int mode, u64 data)
1674 struct hfi1_devdata *dd = context;
1676 return dd->verbs_dev.n_piowait;
1679 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1680 void *context, int vl, int mode, u64 data)
1682 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684 return dd->verbs_dev.n_piodrain;
1687 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1688 void *context, int vl, int mode, u64 data)
1690 struct hfi1_devdata *dd = context;
1692 return dd->verbs_dev.n_txwait;
1695 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1696 void *context, int vl, int mode, u64 data)
1698 struct hfi1_devdata *dd = context;
1700 return dd->verbs_dev.n_kmem_wait;
1703 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1704 void *context, int vl, int mode, u64 data)
1706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1712 /* Software counters for the error status bits within MISC_ERR_STATUS */
1713 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1714 void *context, int vl, int mode,
1717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719 return dd->misc_err_status_cnt[12];
1722 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1723 void *context, int vl, int mode,
1726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728 return dd->misc_err_status_cnt[11];
1731 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1732 void *context, int vl, int mode,
1735 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737 return dd->misc_err_status_cnt[10];
1740 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1741 void *context, int vl,
1744 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746 return dd->misc_err_status_cnt[9];
1749 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1750 void *context, int vl, int mode,
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755 return dd->misc_err_status_cnt[8];
1758 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1759 const struct cntr_entry *entry,
1760 void *context, int vl, int mode, u64 data)
1762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764 return dd->misc_err_status_cnt[7];
1767 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1768 void *context, int vl,
1771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773 return dd->misc_err_status_cnt[6];
1776 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1777 void *context, int vl, int mode,
1780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782 return dd->misc_err_status_cnt[5];
1785 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1786 void *context, int vl, int mode,
1789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791 return dd->misc_err_status_cnt[4];
1794 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1795 void *context, int vl,
1798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800 return dd->misc_err_status_cnt[3];
1803 static u64 access_misc_csr_write_bad_addr_err_cnt(
1804 const struct cntr_entry *entry,
1805 void *context, int vl, int mode, u64 data)
1807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809 return dd->misc_err_status_cnt[2];
1812 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1813 void *context, int vl,
1816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818 return dd->misc_err_status_cnt[1];
1821 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1822 void *context, int vl, int mode,
1825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827 return dd->misc_err_status_cnt[0];
1831 * Software counter for the aggregate of
1832 * individual CceErrStatus counters
1834 static u64 access_sw_cce_err_status_aggregated_cnt(
1835 const struct cntr_entry *entry,
1836 void *context, int vl, int mode, u64 data)
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840 return dd->sw_cce_err_status_aggregate;
1844 * Software counters corresponding to each of the
1845 * error status bits within CceErrStatus
1847 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1848 void *context, int vl, int mode,
1851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853 return dd->cce_err_status_cnt[40];
1856 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1857 void *context, int vl, int mode,
1860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862 return dd->cce_err_status_cnt[39];
1865 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1866 void *context, int vl, int mode,
1869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871 return dd->cce_err_status_cnt[38];
1874 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1875 void *context, int vl, int mode,
1878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880 return dd->cce_err_status_cnt[37];
1883 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1884 void *context, int vl, int mode,
1887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889 return dd->cce_err_status_cnt[36];
1892 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1893 const struct cntr_entry *entry,
1894 void *context, int vl, int mode, u64 data)
1896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898 return dd->cce_err_status_cnt[35];
1901 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1902 const struct cntr_entry *entry,
1903 void *context, int vl, int mode, u64 data)
1905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907 return dd->cce_err_status_cnt[34];
1910 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1911 void *context, int vl,
1914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916 return dd->cce_err_status_cnt[33];
1919 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1920 void *context, int vl, int mode,
1923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925 return dd->cce_err_status_cnt[32];
1928 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1929 void *context, int vl, int mode, u64 data)
1931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933 return dd->cce_err_status_cnt[31];
1936 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1937 void *context, int vl, int mode,
1940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942 return dd->cce_err_status_cnt[30];
1945 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1946 void *context, int vl, int mode,
1949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951 return dd->cce_err_status_cnt[29];
1954 static u64 access_pcic_transmit_back_parity_err_cnt(
1955 const struct cntr_entry *entry,
1956 void *context, int vl, int mode, u64 data)
1958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960 return dd->cce_err_status_cnt[28];
1963 static u64 access_pcic_transmit_front_parity_err_cnt(
1964 const struct cntr_entry *entry,
1965 void *context, int vl, int mode, u64 data)
1967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969 return dd->cce_err_status_cnt[27];
1972 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1973 void *context, int vl, int mode,
1976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978 return dd->cce_err_status_cnt[26];
1981 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1982 void *context, int vl, int mode,
1985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987 return dd->cce_err_status_cnt[25];
1990 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1991 void *context, int vl, int mode,
1994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996 return dd->cce_err_status_cnt[24];
1999 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2000 void *context, int vl, int mode,
2003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005 return dd->cce_err_status_cnt[23];
2008 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2009 void *context, int vl,
2012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014 return dd->cce_err_status_cnt[22];
2017 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2018 void *context, int vl, int mode,
2021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023 return dd->cce_err_status_cnt[21];
2026 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2027 const struct cntr_entry *entry,
2028 void *context, int vl, int mode, u64 data)
2030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032 return dd->cce_err_status_cnt[20];
2035 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2036 void *context, int vl,
2039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041 return dd->cce_err_status_cnt[19];
2044 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2045 void *context, int vl, int mode,
2048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050 return dd->cce_err_status_cnt[18];
2053 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2054 void *context, int vl, int mode,
2057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059 return dd->cce_err_status_cnt[17];
2062 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2063 void *context, int vl, int mode,
2066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068 return dd->cce_err_status_cnt[16];
2071 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2072 void *context, int vl, int mode,
2075 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077 return dd->cce_err_status_cnt[15];
2080 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2081 void *context, int vl,
2084 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086 return dd->cce_err_status_cnt[14];
2089 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2090 void *context, int vl, int mode,
2093 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095 return dd->cce_err_status_cnt[13];
2098 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2099 const struct cntr_entry *entry,
2100 void *context, int vl, int mode, u64 data)
2102 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104 return dd->cce_err_status_cnt[12];
2107 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2108 const struct cntr_entry *entry,
2109 void *context, int vl, int mode, u64 data)
2111 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113 return dd->cce_err_status_cnt[11];
2116 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2117 const struct cntr_entry *entry,
2118 void *context, int vl, int mode, u64 data)
2120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122 return dd->cce_err_status_cnt[10];
2125 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2126 const struct cntr_entry *entry,
2127 void *context, int vl, int mode, u64 data)
2129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131 return dd->cce_err_status_cnt[9];
2134 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2135 const struct cntr_entry *entry,
2136 void *context, int vl, int mode, u64 data)
2138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140 return dd->cce_err_status_cnt[8];
2143 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2144 void *context, int vl,
2147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149 return dd->cce_err_status_cnt[7];
2152 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2153 const struct cntr_entry *entry,
2154 void *context, int vl, int mode, u64 data)
2156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158 return dd->cce_err_status_cnt[6];
2161 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2162 void *context, int vl, int mode,
2165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167 return dd->cce_err_status_cnt[5];
2170 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2171 void *context, int vl, int mode,
2174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176 return dd->cce_err_status_cnt[4];
2179 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2180 const struct cntr_entry *entry,
2181 void *context, int vl, int mode, u64 data)
2183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185 return dd->cce_err_status_cnt[3];
2188 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2189 void *context, int vl,
2192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194 return dd->cce_err_status_cnt[2];
2197 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2198 void *context, int vl,
2201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203 return dd->cce_err_status_cnt[1];
2206 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2207 void *context, int vl, int mode,
2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212 return dd->cce_err_status_cnt[0];
2216 * Software counters corresponding to each of the
2217 * error status bits within RcvErrStatus
2219 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2220 void *context, int vl, int mode,
2223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225 return dd->rcv_err_status_cnt[63];
2228 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2229 void *context, int vl,
2232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234 return dd->rcv_err_status_cnt[62];
2237 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2238 void *context, int vl, int mode,
2241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243 return dd->rcv_err_status_cnt[61];
2246 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2247 void *context, int vl, int mode,
2250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252 return dd->rcv_err_status_cnt[60];
2255 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2256 void *context, int vl,
2259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261 return dd->rcv_err_status_cnt[59];
2264 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2265 void *context, int vl,
2268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270 return dd->rcv_err_status_cnt[58];
2273 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2274 void *context, int vl, int mode,
2277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279 return dd->rcv_err_status_cnt[57];
2282 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2283 void *context, int vl, int mode,
2286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288 return dd->rcv_err_status_cnt[56];
2291 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2292 void *context, int vl, int mode,
2295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297 return dd->rcv_err_status_cnt[55];
2300 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2301 const struct cntr_entry *entry,
2302 void *context, int vl, int mode, u64 data)
2304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306 return dd->rcv_err_status_cnt[54];
2309 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2310 const struct cntr_entry *entry,
2311 void *context, int vl, int mode, u64 data)
2313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315 return dd->rcv_err_status_cnt[53];
2318 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2319 void *context, int vl,
2322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324 return dd->rcv_err_status_cnt[52];
2327 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2328 void *context, int vl,
2331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333 return dd->rcv_err_status_cnt[51];
2336 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2337 void *context, int vl,
2340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342 return dd->rcv_err_status_cnt[50];
2345 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2346 void *context, int vl,
2349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351 return dd->rcv_err_status_cnt[49];
2354 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2355 void *context, int vl,
2358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360 return dd->rcv_err_status_cnt[48];
2363 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2364 void *context, int vl,
2367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369 return dd->rcv_err_status_cnt[47];
2372 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2373 void *context, int vl, int mode,
2376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378 return dd->rcv_err_status_cnt[46];
2381 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2382 const struct cntr_entry *entry,
2383 void *context, int vl, int mode, u64 data)
2385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387 return dd->rcv_err_status_cnt[45];
2390 static u64 access_rx_lookup_csr_parity_err_cnt(
2391 const struct cntr_entry *entry,
2392 void *context, int vl, int mode, u64 data)
2394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396 return dd->rcv_err_status_cnt[44];
2399 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2400 const struct cntr_entry *entry,
2401 void *context, int vl, int mode, u64 data)
2403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405 return dd->rcv_err_status_cnt[43];
2408 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2409 const struct cntr_entry *entry,
2410 void *context, int vl, int mode, u64 data)
2412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414 return dd->rcv_err_status_cnt[42];
2417 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2418 const struct cntr_entry *entry,
2419 void *context, int vl, int mode, u64 data)
2421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423 return dd->rcv_err_status_cnt[41];
2426 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2427 const struct cntr_entry *entry,
2428 void *context, int vl, int mode, u64 data)
2430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432 return dd->rcv_err_status_cnt[40];
2435 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2436 const struct cntr_entry *entry,
2437 void *context, int vl, int mode, u64 data)
2439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441 return dd->rcv_err_status_cnt[39];
2444 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2445 const struct cntr_entry *entry,
2446 void *context, int vl, int mode, u64 data)
2448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450 return dd->rcv_err_status_cnt[38];
2453 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2454 const struct cntr_entry *entry,
2455 void *context, int vl, int mode, u64 data)
2457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459 return dd->rcv_err_status_cnt[37];
2462 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2463 const struct cntr_entry *entry,
2464 void *context, int vl, int mode, u64 data)
2466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468 return dd->rcv_err_status_cnt[36];
2471 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2472 const struct cntr_entry *entry,
2473 void *context, int vl, int mode, u64 data)
2475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477 return dd->rcv_err_status_cnt[35];
2480 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2481 const struct cntr_entry *entry,
2482 void *context, int vl, int mode, u64 data)
2484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486 return dd->rcv_err_status_cnt[34];
2489 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2490 const struct cntr_entry *entry,
2491 void *context, int vl, int mode, u64 data)
2493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495 return dd->rcv_err_status_cnt[33];
2498 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2499 void *context, int vl, int mode,
2502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504 return dd->rcv_err_status_cnt[32];
2507 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2508 void *context, int vl, int mode,
2511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513 return dd->rcv_err_status_cnt[31];
2516 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2517 void *context, int vl, int mode,
2520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522 return dd->rcv_err_status_cnt[30];
2525 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2526 void *context, int vl, int mode,
2529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531 return dd->rcv_err_status_cnt[29];
2534 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2535 void *context, int vl,
2538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540 return dd->rcv_err_status_cnt[28];
2543 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2544 const struct cntr_entry *entry,
2545 void *context, int vl, int mode, u64 data)
2547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549 return dd->rcv_err_status_cnt[27];
2552 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2553 const struct cntr_entry *entry,
2554 void *context, int vl, int mode, u64 data)
2556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558 return dd->rcv_err_status_cnt[26];
2561 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2562 const struct cntr_entry *entry,
2563 void *context, int vl, int mode, u64 data)
2565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567 return dd->rcv_err_status_cnt[25];
2570 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2571 const struct cntr_entry *entry,
2572 void *context, int vl, int mode, u64 data)
2574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576 return dd->rcv_err_status_cnt[24];
2579 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2580 const struct cntr_entry *entry,
2581 void *context, int vl, int mode, u64 data)
2583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585 return dd->rcv_err_status_cnt[23];
2588 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2589 const struct cntr_entry *entry,
2590 void *context, int vl, int mode, u64 data)
2592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594 return dd->rcv_err_status_cnt[22];
2597 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2598 const struct cntr_entry *entry,
2599 void *context, int vl, int mode, u64 data)
2601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603 return dd->rcv_err_status_cnt[21];
2606 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2607 const struct cntr_entry *entry,
2608 void *context, int vl, int mode, u64 data)
2610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612 return dd->rcv_err_status_cnt[20];
2615 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2616 const struct cntr_entry *entry,
2617 void *context, int vl, int mode, u64 data)
2619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621 return dd->rcv_err_status_cnt[19];
2624 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2625 void *context, int vl,
2628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630 return dd->rcv_err_status_cnt[18];
2633 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2634 void *context, int vl,
2637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639 return dd->rcv_err_status_cnt[17];
2642 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2643 const struct cntr_entry *entry,
2644 void *context, int vl, int mode, u64 data)
2646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648 return dd->rcv_err_status_cnt[16];
2651 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2652 const struct cntr_entry *entry,
2653 void *context, int vl, int mode, u64 data)
2655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657 return dd->rcv_err_status_cnt[15];
2660 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2661 void *context, int vl,
2664 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666 return dd->rcv_err_status_cnt[14];
2669 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2670 void *context, int vl,
2673 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675 return dd->rcv_err_status_cnt[13];
2678 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2679 void *context, int vl, int mode,
2682 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684 return dd->rcv_err_status_cnt[12];
2687 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2688 void *context, int vl, int mode,
2691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693 return dd->rcv_err_status_cnt[11];
2696 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2697 void *context, int vl, int mode,
2700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702 return dd->rcv_err_status_cnt[10];
2705 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2706 void *context, int vl, int mode,
2709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711 return dd->rcv_err_status_cnt[9];
2714 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2715 void *context, int vl, int mode,
2718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720 return dd->rcv_err_status_cnt[8];
2723 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2724 const struct cntr_entry *entry,
2725 void *context, int vl, int mode, u64 data)
2727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729 return dd->rcv_err_status_cnt[7];
2732 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2733 const struct cntr_entry *entry,
2734 void *context, int vl, int mode, u64 data)
2736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738 return dd->rcv_err_status_cnt[6];
2741 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2742 void *context, int vl, int mode,
2745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747 return dd->rcv_err_status_cnt[5];
2750 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2751 void *context, int vl, int mode,
2754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756 return dd->rcv_err_status_cnt[4];
2759 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2760 void *context, int vl, int mode,
2763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765 return dd->rcv_err_status_cnt[3];
2768 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2769 void *context, int vl, int mode,
2772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774 return dd->rcv_err_status_cnt[2];
2777 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2778 void *context, int vl, int mode,
2781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783 return dd->rcv_err_status_cnt[1];
2786 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2787 void *context, int vl, int mode,
2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792 return dd->rcv_err_status_cnt[0];
2796 * Software counters corresponding to each of the
2797 * error status bits within SendPioErrStatus
2799 static u64 access_pio_pec_sop_head_parity_err_cnt(
2800 const struct cntr_entry *entry,
2801 void *context, int vl, int mode, u64 data)
2803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805 return dd->send_pio_err_status_cnt[35];
2808 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2809 const struct cntr_entry *entry,
2810 void *context, int vl, int mode, u64 data)
2812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814 return dd->send_pio_err_status_cnt[34];
2817 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2818 const struct cntr_entry *entry,
2819 void *context, int vl, int mode, u64 data)
2821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823 return dd->send_pio_err_status_cnt[33];
2826 static u64 access_pio_current_free_cnt_parity_err_cnt(
2827 const struct cntr_entry *entry,
2828 void *context, int vl, int mode, u64 data)
2830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832 return dd->send_pio_err_status_cnt[32];
2835 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2836 void *context, int vl, int mode,
2839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841 return dd->send_pio_err_status_cnt[31];
2844 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2845 void *context, int vl, int mode,
2848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850 return dd->send_pio_err_status_cnt[30];
2853 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2854 void *context, int vl, int mode,
2857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859 return dd->send_pio_err_status_cnt[29];
2862 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2863 const struct cntr_entry *entry,
2864 void *context, int vl, int mode, u64 data)
2866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868 return dd->send_pio_err_status_cnt[28];
2871 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2872 void *context, int vl, int mode,
2875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877 return dd->send_pio_err_status_cnt[27];
2880 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2881 void *context, int vl, int mode,
2884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886 return dd->send_pio_err_status_cnt[26];
2889 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2890 void *context, int vl,
2893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895 return dd->send_pio_err_status_cnt[25];
2898 static u64 access_pio_block_qw_count_parity_err_cnt(
2899 const struct cntr_entry *entry,
2900 void *context, int vl, int mode, u64 data)
2902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904 return dd->send_pio_err_status_cnt[24];
2907 static u64 access_pio_write_qw_valid_parity_err_cnt(
2908 const struct cntr_entry *entry,
2909 void *context, int vl, int mode, u64 data)
2911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913 return dd->send_pio_err_status_cnt[23];
2916 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2917 void *context, int vl, int mode,
2920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922 return dd->send_pio_err_status_cnt[22];
2925 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2926 void *context, int vl,
2929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931 return dd->send_pio_err_status_cnt[21];
2934 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2935 void *context, int vl,
2938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940 return dd->send_pio_err_status_cnt[20];
2943 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2944 void *context, int vl,
2947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949 return dd->send_pio_err_status_cnt[19];
2952 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2953 const struct cntr_entry *entry,
2954 void *context, int vl, int mode, u64 data)
2956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958 return dd->send_pio_err_status_cnt[18];
2961 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2962 void *context, int vl, int mode,
2965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967 return dd->send_pio_err_status_cnt[17];
2970 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2971 void *context, int vl, int mode,
2974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976 return dd->send_pio_err_status_cnt[16];
2979 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2980 const struct cntr_entry *entry,
2981 void *context, int vl, int mode, u64 data)
2983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985 return dd->send_pio_err_status_cnt[15];
2988 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2989 const struct cntr_entry *entry,
2990 void *context, int vl, int mode, u64 data)
2992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994 return dd->send_pio_err_status_cnt[14];
2997 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2998 const struct cntr_entry *entry,
2999 void *context, int vl, int mode, u64 data)
3001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003 return dd->send_pio_err_status_cnt[13];
3006 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3007 const struct cntr_entry *entry,
3008 void *context, int vl, int mode, u64 data)
3010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012 return dd->send_pio_err_status_cnt[12];
3015 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3016 const struct cntr_entry *entry,
3017 void *context, int vl, int mode, u64 data)
3019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021 return dd->send_pio_err_status_cnt[11];
3024 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3025 const struct cntr_entry *entry,
3026 void *context, int vl, int mode, u64 data)
3028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030 return dd->send_pio_err_status_cnt[10];
3033 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3034 const struct cntr_entry *entry,
3035 void *context, int vl, int mode, u64 data)
3037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039 return dd->send_pio_err_status_cnt[9];
3042 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3043 const struct cntr_entry *entry,
3044 void *context, int vl, int mode, u64 data)
3046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048 return dd->send_pio_err_status_cnt[8];
3051 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3052 const struct cntr_entry *entry,
3053 void *context, int vl, int mode, u64 data)
3055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057 return dd->send_pio_err_status_cnt[7];
3060 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3061 void *context, int vl, int mode,
3064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066 return dd->send_pio_err_status_cnt[6];
3069 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3070 void *context, int vl, int mode,
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075 return dd->send_pio_err_status_cnt[5];
3078 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3079 void *context, int vl, int mode,
3082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084 return dd->send_pio_err_status_cnt[4];
3087 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3088 void *context, int vl, int mode,
3091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093 return dd->send_pio_err_status_cnt[3];
3096 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3097 void *context, int vl, int mode,
3100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102 return dd->send_pio_err_status_cnt[2];
3105 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3106 void *context, int vl,
3109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111 return dd->send_pio_err_status_cnt[1];
3114 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3115 void *context, int vl, int mode,
3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120 return dd->send_pio_err_status_cnt[0];
3124 * Software counters corresponding to each of the
3125 * error status bits within SendDmaErrStatus
3127 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3128 const struct cntr_entry *entry,
3129 void *context, int vl, int mode, u64 data)
3131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133 return dd->send_dma_err_status_cnt[3];
3136 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3137 const struct cntr_entry *entry,
3138 void *context, int vl, int mode, u64 data)
3140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142 return dd->send_dma_err_status_cnt[2];
3145 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3146 void *context, int vl, int mode,
3149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151 return dd->send_dma_err_status_cnt[1];
3154 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3155 void *context, int vl, int mode,
3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160 return dd->send_dma_err_status_cnt[0];
3164 * Software counters corresponding to each of the
3165 * error status bits within SendEgressErrStatus
3167 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3168 const struct cntr_entry *entry,
3169 void *context, int vl, int mode, u64 data)
3171 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173 return dd->send_egress_err_status_cnt[63];
3176 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3177 const struct cntr_entry *entry,
3178 void *context, int vl, int mode, u64 data)
3180 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182 return dd->send_egress_err_status_cnt[62];
3185 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3186 void *context, int vl, int mode,
3189 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191 return dd->send_egress_err_status_cnt[61];
3194 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3195 void *context, int vl,
3198 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200 return dd->send_egress_err_status_cnt[60];
3203 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3204 const struct cntr_entry *entry,
3205 void *context, int vl, int mode, u64 data)
3207 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209 return dd->send_egress_err_status_cnt[59];
3212 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3213 void *context, int vl, int mode,
3216 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218 return dd->send_egress_err_status_cnt[58];
3221 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3222 void *context, int vl, int mode,
3225 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227 return dd->send_egress_err_status_cnt[57];
3230 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3231 void *context, int vl, int mode,
3234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236 return dd->send_egress_err_status_cnt[56];
3239 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3240 void *context, int vl, int mode,
3243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245 return dd->send_egress_err_status_cnt[55];
3248 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3249 void *context, int vl, int mode,
3252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254 return dd->send_egress_err_status_cnt[54];
3257 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3258 void *context, int vl, int mode,
3261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263 return dd->send_egress_err_status_cnt[53];
3266 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3267 void *context, int vl, int mode,
3270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272 return dd->send_egress_err_status_cnt[52];
3275 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3276 void *context, int vl, int mode,
3279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281 return dd->send_egress_err_status_cnt[51];
3284 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3285 void *context, int vl, int mode,
3288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290 return dd->send_egress_err_status_cnt[50];
3293 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3294 void *context, int vl, int mode,
3297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299 return dd->send_egress_err_status_cnt[49];
3302 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3303 void *context, int vl, int mode,
3306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308 return dd->send_egress_err_status_cnt[48];
3311 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3312 void *context, int vl, int mode,
3315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317 return dd->send_egress_err_status_cnt[47];
3320 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3321 void *context, int vl, int mode,
3324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326 return dd->send_egress_err_status_cnt[46];
3329 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3330 void *context, int vl, int mode,
3333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335 return dd->send_egress_err_status_cnt[45];
3338 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3339 void *context, int vl,
3342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344 return dd->send_egress_err_status_cnt[44];
3347 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3348 const struct cntr_entry *entry,
3349 void *context, int vl, int mode, u64 data)
3351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353 return dd->send_egress_err_status_cnt[43];
3356 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3357 void *context, int vl, int mode,
3360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362 return dd->send_egress_err_status_cnt[42];
3365 static u64 access_tx_credit_return_partiy_err_cnt(
3366 const struct cntr_entry *entry,
3367 void *context, int vl, int mode, u64 data)
3369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371 return dd->send_egress_err_status_cnt[41];
3374 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3375 const struct cntr_entry *entry,
3376 void *context, int vl, int mode, u64 data)
3378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380 return dd->send_egress_err_status_cnt[40];
3383 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3384 const struct cntr_entry *entry,
3385 void *context, int vl, int mode, u64 data)
3387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389 return dd->send_egress_err_status_cnt[39];
3392 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3393 const struct cntr_entry *entry,
3394 void *context, int vl, int mode, u64 data)
3396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398 return dd->send_egress_err_status_cnt[38];
3401 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3402 const struct cntr_entry *entry,
3403 void *context, int vl, int mode, u64 data)
3405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407 return dd->send_egress_err_status_cnt[37];
3410 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3411 const struct cntr_entry *entry,
3412 void *context, int vl, int mode, u64 data)
3414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416 return dd->send_egress_err_status_cnt[36];
3419 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3420 const struct cntr_entry *entry,
3421 void *context, int vl, int mode, u64 data)
3423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425 return dd->send_egress_err_status_cnt[35];
3428 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3429 const struct cntr_entry *entry,
3430 void *context, int vl, int mode, u64 data)
3432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434 return dd->send_egress_err_status_cnt[34];
3437 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3438 const struct cntr_entry *entry,
3439 void *context, int vl, int mode, u64 data)
3441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443 return dd->send_egress_err_status_cnt[33];
3446 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3447 const struct cntr_entry *entry,
3448 void *context, int vl, int mode, u64 data)
3450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452 return dd->send_egress_err_status_cnt[32];
3455 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3456 const struct cntr_entry *entry,
3457 void *context, int vl, int mode, u64 data)
3459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461 return dd->send_egress_err_status_cnt[31];
3464 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3465 const struct cntr_entry *entry,
3466 void *context, int vl, int mode, u64 data)
3468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470 return dd->send_egress_err_status_cnt[30];
3473 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3474 const struct cntr_entry *entry,
3475 void *context, int vl, int mode, u64 data)
3477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479 return dd->send_egress_err_status_cnt[29];
3482 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3483 const struct cntr_entry *entry,
3484 void *context, int vl, int mode, u64 data)
3486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488 return dd->send_egress_err_status_cnt[28];
3491 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3492 const struct cntr_entry *entry,
3493 void *context, int vl, int mode, u64 data)
3495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497 return dd->send_egress_err_status_cnt[27];
3500 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3501 const struct cntr_entry *entry,
3502 void *context, int vl, int mode, u64 data)
3504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506 return dd->send_egress_err_status_cnt[26];
3509 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3510 const struct cntr_entry *entry,
3511 void *context, int vl, int mode, u64 data)
3513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515 return dd->send_egress_err_status_cnt[25];
3518 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3519 const struct cntr_entry *entry,
3520 void *context, int vl, int mode, u64 data)
3522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524 return dd->send_egress_err_status_cnt[24];
3527 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3528 const struct cntr_entry *entry,
3529 void *context, int vl, int mode, u64 data)
3531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533 return dd->send_egress_err_status_cnt[23];
3536 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3537 const struct cntr_entry *entry,
3538 void *context, int vl, int mode, u64 data)
3540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542 return dd->send_egress_err_status_cnt[22];
3545 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3546 const struct cntr_entry *entry,
3547 void *context, int vl, int mode, u64 data)
3549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551 return dd->send_egress_err_status_cnt[21];
3554 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3555 const struct cntr_entry *entry,
3556 void *context, int vl, int mode, u64 data)
3558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560 return dd->send_egress_err_status_cnt[20];
3563 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3564 const struct cntr_entry *entry,
3565 void *context, int vl, int mode, u64 data)
3567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569 return dd->send_egress_err_status_cnt[19];
3572 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3573 const struct cntr_entry *entry,
3574 void *context, int vl, int mode, u64 data)
3576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578 return dd->send_egress_err_status_cnt[18];
3581 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3582 const struct cntr_entry *entry,
3583 void *context, int vl, int mode, u64 data)
3585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587 return dd->send_egress_err_status_cnt[17];
3590 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3591 const struct cntr_entry *entry,
3592 void *context, int vl, int mode, u64 data)
3594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596 return dd->send_egress_err_status_cnt[16];
3599 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3600 void *context, int vl, int mode,
3603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605 return dd->send_egress_err_status_cnt[15];
3608 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3609 void *context, int vl,
3612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614 return dd->send_egress_err_status_cnt[14];
3617 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3618 void *context, int vl, int mode,
3621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623 return dd->send_egress_err_status_cnt[13];
3626 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3627 void *context, int vl, int mode,
3630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632 return dd->send_egress_err_status_cnt[12];
3635 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3636 const struct cntr_entry *entry,
3637 void *context, int vl, int mode, u64 data)
3639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641 return dd->send_egress_err_status_cnt[11];
3644 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3645 void *context, int vl, int mode,
3648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650 return dd->send_egress_err_status_cnt[10];
3653 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3654 void *context, int vl, int mode,
3657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659 return dd->send_egress_err_status_cnt[9];
3662 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3663 const struct cntr_entry *entry,
3664 void *context, int vl, int mode, u64 data)
3666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668 return dd->send_egress_err_status_cnt[8];
3671 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3672 const struct cntr_entry *entry,
3673 void *context, int vl, int mode, u64 data)
3675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677 return dd->send_egress_err_status_cnt[7];
3680 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3681 void *context, int vl, int mode,
3684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686 return dd->send_egress_err_status_cnt[6];
3689 static u64 access_tx_incorrect_link_state_err_cnt(
3690 const struct cntr_entry *entry,
3691 void *context, int vl, int mode, u64 data)
3693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695 return dd->send_egress_err_status_cnt[5];
3698 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3699 void *context, int vl, int mode,
3702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704 return dd->send_egress_err_status_cnt[4];
3707 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3708 const struct cntr_entry *entry,
3709 void *context, int vl, int mode, u64 data)
3711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713 return dd->send_egress_err_status_cnt[3];
3716 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3717 void *context, int vl, int mode,
3720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722 return dd->send_egress_err_status_cnt[2];
3725 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3726 const struct cntr_entry *entry,
3727 void *context, int vl, int mode, u64 data)
3729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731 return dd->send_egress_err_status_cnt[1];
3734 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3735 const struct cntr_entry *entry,
3736 void *context, int vl, int mode, u64 data)
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740 return dd->send_egress_err_status_cnt[0];
3744 * Software counters corresponding to each of the
3745 * error status bits within SendErrStatus
3747 static u64 access_send_csr_write_bad_addr_err_cnt(
3748 const struct cntr_entry *entry,
3749 void *context, int vl, int mode, u64 data)
3751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753 return dd->send_err_status_cnt[2];
3756 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3757 void *context, int vl,
3760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762 return dd->send_err_status_cnt[1];
3765 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3766 void *context, int vl, int mode,
3769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771 return dd->send_err_status_cnt[0];
3775 * Software counters corresponding to each of the
3776 * error status bits within SendCtxtErrStatus
3778 static u64 access_pio_write_out_of_bounds_err_cnt(
3779 const struct cntr_entry *entry,
3780 void *context, int vl, int mode, u64 data)
3782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784 return dd->sw_ctxt_err_status_cnt[4];
3787 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3788 void *context, int vl, int mode,
3791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793 return dd->sw_ctxt_err_status_cnt[3];
3796 static u64 access_pio_write_crosses_boundary_err_cnt(
3797 const struct cntr_entry *entry,
3798 void *context, int vl, int mode, u64 data)
3800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802 return dd->sw_ctxt_err_status_cnt[2];
3805 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3806 void *context, int vl,
3809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811 return dd->sw_ctxt_err_status_cnt[1];
3814 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3815 void *context, int vl, int mode,
3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820 return dd->sw_ctxt_err_status_cnt[0];
3824 * Software counters corresponding to each of the
3825 * error status bits within SendDmaEngErrStatus
3827 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3828 const struct cntr_entry *entry,
3829 void *context, int vl, int mode, u64 data)
3831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833 return dd->sw_send_dma_eng_err_status_cnt[23];
3836 static u64 access_sdma_header_storage_cor_err_cnt(
3837 const struct cntr_entry *entry,
3838 void *context, int vl, int mode, u64 data)
3840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842 return dd->sw_send_dma_eng_err_status_cnt[22];
3845 static u64 access_sdma_packet_tracking_cor_err_cnt(
3846 const struct cntr_entry *entry,
3847 void *context, int vl, int mode, u64 data)
3849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851 return dd->sw_send_dma_eng_err_status_cnt[21];
3854 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3855 void *context, int vl, int mode,
3858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860 return dd->sw_send_dma_eng_err_status_cnt[20];
3863 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3864 void *context, int vl, int mode,
3867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869 return dd->sw_send_dma_eng_err_status_cnt[19];
3872 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3873 const struct cntr_entry *entry,
3874 void *context, int vl, int mode, u64 data)
3876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878 return dd->sw_send_dma_eng_err_status_cnt[18];
3881 static u64 access_sdma_header_storage_unc_err_cnt(
3882 const struct cntr_entry *entry,
3883 void *context, int vl, int mode, u64 data)
3885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887 return dd->sw_send_dma_eng_err_status_cnt[17];
3890 static u64 access_sdma_packet_tracking_unc_err_cnt(
3891 const struct cntr_entry *entry,
3892 void *context, int vl, int mode, u64 data)
3894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896 return dd->sw_send_dma_eng_err_status_cnt[16];
3899 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3900 void *context, int vl, int mode,
3903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905 return dd->sw_send_dma_eng_err_status_cnt[15];
3908 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3909 void *context, int vl, int mode,
3912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914 return dd->sw_send_dma_eng_err_status_cnt[14];
3917 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3918 void *context, int vl, int mode,
3921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923 return dd->sw_send_dma_eng_err_status_cnt[13];
3926 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3927 void *context, int vl, int mode,
3930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932 return dd->sw_send_dma_eng_err_status_cnt[12];
3935 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3936 void *context, int vl, int mode,
3939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941 return dd->sw_send_dma_eng_err_status_cnt[11];
3944 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3945 void *context, int vl, int mode,
3948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950 return dd->sw_send_dma_eng_err_status_cnt[10];
3953 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3954 void *context, int vl, int mode,
3957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959 return dd->sw_send_dma_eng_err_status_cnt[9];
3962 static u64 access_sdma_packet_desc_overflow_err_cnt(
3963 const struct cntr_entry *entry,
3964 void *context, int vl, int mode, u64 data)
3966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968 return dd->sw_send_dma_eng_err_status_cnt[8];
3971 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3972 void *context, int vl,
3975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977 return dd->sw_send_dma_eng_err_status_cnt[7];
3980 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3981 void *context, int vl, int mode, u64 data)
3983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985 return dd->sw_send_dma_eng_err_status_cnt[6];
3988 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3989 void *context, int vl, int mode,
3992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994 return dd->sw_send_dma_eng_err_status_cnt[5];
3997 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3998 void *context, int vl, int mode,
4001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003 return dd->sw_send_dma_eng_err_status_cnt[4];
4006 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4007 const struct cntr_entry *entry,
4008 void *context, int vl, int mode, u64 data)
4010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012 return dd->sw_send_dma_eng_err_status_cnt[3];
4015 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4016 void *context, int vl, int mode,
4019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021 return dd->sw_send_dma_eng_err_status_cnt[2];
4024 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4025 void *context, int vl, int mode,
4028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030 return dd->sw_send_dma_eng_err_status_cnt[1];
4033 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4034 void *context, int vl, int mode,
4037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039 return dd->sw_send_dma_eng_err_status_cnt[0];
4042 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4043 void *context, int vl, int mode,
4046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4049 u64 csr = entry->csr;
4051 val = read_write_csr(dd, csr, mode, data);
4052 if (mode == CNTR_MODE_R) {
4053 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4054 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4055 } else if (mode == CNTR_MODE_W) {
4056 dd->sw_rcv_bypass_packet_errors = 0;
4058 dd_dev_err(dd, "Invalid cntr register access mode");
4064 #define def_access_sw_cpu(cntr) \
4065 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4066 void *context, int vl, int mode, u64 data) \
4068 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4069 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4070 ppd->ibport_data.rvp.cntr, vl, \
4074 def_access_sw_cpu(rc_acks);
4075 def_access_sw_cpu(rc_qacks);
4076 def_access_sw_cpu(rc_delayed_comp);
4078 #define def_access_ibp_counter(cntr) \
4079 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4080 void *context, int vl, int mode, u64 data) \
4082 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4084 if (vl != CNTR_INVALID_VL) \
4087 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4091 def_access_ibp_counter(loop_pkts);
4092 def_access_ibp_counter(rc_resends);
4093 def_access_ibp_counter(rnr_naks);
4094 def_access_ibp_counter(other_naks);
4095 def_access_ibp_counter(rc_timeouts);
4096 def_access_ibp_counter(pkt_drops);
4097 def_access_ibp_counter(dmawait);
4098 def_access_ibp_counter(rc_seqnak);
4099 def_access_ibp_counter(rc_dupreq);
4100 def_access_ibp_counter(rdma_seq);
4101 def_access_ibp_counter(unaligned);
4102 def_access_ibp_counter(seq_naks);
4104 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4105 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4106 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4108 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4110 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4111 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4113 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4115 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4116 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4117 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4118 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4119 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4121 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4123 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4125 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4127 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4129 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4131 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4132 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4133 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4134 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4135 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4137 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4138 access_dc_rcv_err_cnt),
4139 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4141 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4143 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4145 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4146 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4147 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4148 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4150 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4151 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4152 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4154 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4156 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4158 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4160 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4162 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4164 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4166 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4167 CNTR_SYNTH | CNTR_VL),
4168 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4169 CNTR_SYNTH | CNTR_VL),
4170 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4171 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4172 CNTR_SYNTH | CNTR_VL),
4173 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4174 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4175 CNTR_SYNTH | CNTR_VL),
4176 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4178 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4179 CNTR_SYNTH | CNTR_VL),
4180 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4182 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4183 CNTR_SYNTH | CNTR_VL),
4185 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4187 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4189 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4191 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4193 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4195 [C_DC_CRC_MULT_LN] =
4196 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4198 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4200 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4202 [C_DC_SEQ_CRC_CNT] =
4203 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4205 [C_DC_ESC0_ONLY_CNT] =
4206 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4208 [C_DC_ESC0_PLUS1_CNT] =
4209 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4211 [C_DC_ESC0_PLUS2_CNT] =
4212 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4214 [C_DC_REINIT_FROM_PEER_CNT] =
4215 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4217 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4219 [C_DC_MISC_FLG_CNT] =
4220 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4222 [C_DC_PRF_GOOD_LTP_CNT] =
4223 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4224 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4225 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4227 [C_DC_PRF_RX_FLIT_CNT] =
4228 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4229 [C_DC_PRF_TX_FLIT_CNT] =
4230 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4231 [C_DC_PRF_CLK_CNTR] =
4232 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4233 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4234 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4235 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4236 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4238 [C_DC_PG_STS_TX_SBE_CNT] =
4239 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4240 [C_DC_PG_STS_TX_MBE_CNT] =
4241 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4243 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4244 access_sw_cpu_intr),
4245 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4246 access_sw_cpu_rcv_limit),
4247 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4248 access_sw_vtx_wait),
4249 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4250 access_sw_pio_wait),
4251 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4252 access_sw_pio_drain),
4253 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4254 access_sw_kmem_wait),
4255 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4256 access_sw_send_schedule),
4257 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4258 SEND_DMA_DESC_FETCHED_CNT, 0,
4259 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4260 dev_access_u32_csr),
4261 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4262 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4263 access_sde_int_cnt),
4264 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4265 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4266 access_sde_err_cnt),
4267 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4268 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4269 access_sde_idle_int_cnt),
4270 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4271 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272 access_sde_progress_int_cnt),
4273 /* MISC_ERR_STATUS */
4274 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4276 access_misc_pll_lock_fail_err_cnt),
4277 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4279 access_misc_mbist_fail_err_cnt),
4280 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4282 access_misc_invalid_eep_cmd_err_cnt),
4283 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4285 access_misc_efuse_done_parity_err_cnt),
4286 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4288 access_misc_efuse_write_err_cnt),
4289 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4291 access_misc_efuse_read_bad_addr_err_cnt),
4292 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4294 access_misc_efuse_csr_parity_err_cnt),
4295 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4297 access_misc_fw_auth_failed_err_cnt),
4298 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4300 access_misc_key_mismatch_err_cnt),
4301 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4303 access_misc_sbus_write_failed_err_cnt),
4304 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4306 access_misc_csr_write_bad_addr_err_cnt),
4307 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4309 access_misc_csr_read_bad_addr_err_cnt),
4310 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4312 access_misc_csr_parity_err_cnt),
4314 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4316 access_sw_cce_err_status_aggregated_cnt),
4317 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4319 access_cce_msix_csr_parity_err_cnt),
4320 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4322 access_cce_int_map_unc_err_cnt),
4323 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4325 access_cce_int_map_cor_err_cnt),
4326 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4328 access_cce_msix_table_unc_err_cnt),
4329 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4331 access_cce_msix_table_cor_err_cnt),
4332 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4334 access_cce_rxdma_conv_fifo_parity_err_cnt),
4335 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4337 access_cce_rcpl_async_fifo_parity_err_cnt),
4338 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4340 access_cce_seg_write_bad_addr_err_cnt),
4341 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4343 access_cce_seg_read_bad_addr_err_cnt),
4344 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4346 access_la_triggered_cnt),
4347 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4349 access_cce_trgt_cpl_timeout_err_cnt),
4350 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4352 access_pcic_receive_parity_err_cnt),
4353 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4355 access_pcic_transmit_back_parity_err_cnt),
4356 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4358 access_pcic_transmit_front_parity_err_cnt),
4359 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4361 access_pcic_cpl_dat_q_unc_err_cnt),
4362 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4364 access_pcic_cpl_hd_q_unc_err_cnt),
4365 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4367 access_pcic_post_dat_q_unc_err_cnt),
4368 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4370 access_pcic_post_hd_q_unc_err_cnt),
4371 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4373 access_pcic_retry_sot_mem_unc_err_cnt),
4374 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4376 access_pcic_retry_mem_unc_err),
4377 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4379 access_pcic_n_post_dat_q_parity_err_cnt),
4380 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4382 access_pcic_n_post_h_q_parity_err_cnt),
4383 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4385 access_pcic_cpl_dat_q_cor_err_cnt),
4386 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4388 access_pcic_cpl_hd_q_cor_err_cnt),
4389 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4391 access_pcic_post_dat_q_cor_err_cnt),
4392 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4394 access_pcic_post_hd_q_cor_err_cnt),
4395 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4397 access_pcic_retry_sot_mem_cor_err_cnt),
4398 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4400 access_pcic_retry_mem_cor_err_cnt),
4401 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4402 "CceCli1AsyncFifoDbgParityError", 0, 0,
4404 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4405 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4406 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4408 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4410 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4411 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4413 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4414 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4415 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4417 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4418 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4420 access_cce_cli2_async_fifo_parity_err_cnt),
4421 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4423 access_cce_csr_cfg_bus_parity_err_cnt),
4424 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4426 access_cce_cli0_async_fifo_parity_err_cnt),
4427 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4429 access_cce_rspd_data_parity_err_cnt),
4430 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4432 access_cce_trgt_access_err_cnt),
4433 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4435 access_cce_trgt_async_fifo_parity_err_cnt),
4436 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4438 access_cce_csr_write_bad_addr_err_cnt),
4439 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4441 access_cce_csr_read_bad_addr_err_cnt),
4442 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4444 access_ccs_csr_parity_err_cnt),
4447 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4449 access_rx_csr_parity_err_cnt),
4450 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4452 access_rx_csr_write_bad_addr_err_cnt),
4453 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4455 access_rx_csr_read_bad_addr_err_cnt),
4456 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4458 access_rx_dma_csr_unc_err_cnt),
4459 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4461 access_rx_dma_dq_fsm_encoding_err_cnt),
4462 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4464 access_rx_dma_eq_fsm_encoding_err_cnt),
4465 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4467 access_rx_dma_csr_parity_err_cnt),
4468 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4470 access_rx_rbuf_data_cor_err_cnt),
4471 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4473 access_rx_rbuf_data_unc_err_cnt),
4474 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4476 access_rx_dma_data_fifo_rd_cor_err_cnt),
4477 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4479 access_rx_dma_data_fifo_rd_unc_err_cnt),
4480 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4482 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4483 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4485 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4486 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4488 access_rx_rbuf_desc_part2_cor_err_cnt),
4489 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4491 access_rx_rbuf_desc_part2_unc_err_cnt),
4492 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4494 access_rx_rbuf_desc_part1_cor_err_cnt),
4495 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4497 access_rx_rbuf_desc_part1_unc_err_cnt),
4498 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4500 access_rx_hq_intr_fsm_err_cnt),
4501 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4503 access_rx_hq_intr_csr_parity_err_cnt),
4504 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4506 access_rx_lookup_csr_parity_err_cnt),
4507 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4509 access_rx_lookup_rcv_array_cor_err_cnt),
4510 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4512 access_rx_lookup_rcv_array_unc_err_cnt),
4513 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4515 access_rx_lookup_des_part2_parity_err_cnt),
4516 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4518 access_rx_lookup_des_part1_unc_cor_err_cnt),
4519 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4521 access_rx_lookup_des_part1_unc_err_cnt),
4522 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4524 access_rx_rbuf_next_free_buf_cor_err_cnt),
4525 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4527 access_rx_rbuf_next_free_buf_unc_err_cnt),
4528 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4529 "RxRbufFlInitWrAddrParityErr", 0, 0,
4531 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4532 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4534 access_rx_rbuf_fl_initdone_parity_err_cnt),
4535 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4537 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4538 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4540 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4541 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4543 access_rx_rbuf_empty_err_cnt),
4544 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4546 access_rx_rbuf_full_err_cnt),
4547 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4549 access_rbuf_bad_lookup_err_cnt),
4550 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4552 access_rbuf_ctx_id_parity_err_cnt),
4553 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4555 access_rbuf_csr_qeopdw_parity_err_cnt),
4556 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4557 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4559 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4560 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4561 "RxRbufCsrQTlPtrParityErr", 0, 0,
4563 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4564 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4566 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4567 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4569 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4570 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4572 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4575 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4577 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4579 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4580 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4582 access_rx_rbuf_block_list_read_cor_err_cnt),
4583 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4585 access_rx_rbuf_block_list_read_unc_err_cnt),
4586 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4588 access_rx_rbuf_lookup_des_cor_err_cnt),
4589 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4591 access_rx_rbuf_lookup_des_unc_err_cnt),
4592 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4593 "RxRbufLookupDesRegUncCorErr", 0, 0,
4595 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4596 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4598 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4599 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4601 access_rx_rbuf_free_list_cor_err_cnt),
4602 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4604 access_rx_rbuf_free_list_unc_err_cnt),
4605 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4607 access_rx_rcv_fsm_encoding_err_cnt),
4608 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4610 access_rx_dma_flag_cor_err_cnt),
4611 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4613 access_rx_dma_flag_unc_err_cnt),
4614 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4616 access_rx_dc_sop_eop_parity_err_cnt),
4617 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4619 access_rx_rcv_csr_parity_err_cnt),
4620 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4622 access_rx_rcv_qp_map_table_cor_err_cnt),
4623 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4625 access_rx_rcv_qp_map_table_unc_err_cnt),
4626 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4628 access_rx_rcv_data_cor_err_cnt),
4629 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4631 access_rx_rcv_data_unc_err_cnt),
4632 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4634 access_rx_rcv_hdr_cor_err_cnt),
4635 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4637 access_rx_rcv_hdr_unc_err_cnt),
4638 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4640 access_rx_dc_intf_parity_err_cnt),
4641 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4643 access_rx_dma_csr_cor_err_cnt),
4644 /* SendPioErrStatus */
4645 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4647 access_pio_pec_sop_head_parity_err_cnt),
4648 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4650 access_pio_pcc_sop_head_parity_err_cnt),
4651 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4653 access_pio_last_returned_cnt_parity_err_cnt),
4654 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4656 access_pio_current_free_cnt_parity_err_cnt),
4657 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4659 access_pio_reserved_31_err_cnt),
4660 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4662 access_pio_reserved_30_err_cnt),
4663 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4665 access_pio_ppmc_sop_len_err_cnt),
4666 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4668 access_pio_ppmc_bqc_mem_parity_err_cnt),
4669 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4671 access_pio_vl_fifo_parity_err_cnt),
4672 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4674 access_pio_vlf_sop_parity_err_cnt),
4675 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4677 access_pio_vlf_v1_len_parity_err_cnt),
4678 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4680 access_pio_block_qw_count_parity_err_cnt),
4681 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4683 access_pio_write_qw_valid_parity_err_cnt),
4684 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4686 access_pio_state_machine_err_cnt),
4687 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4689 access_pio_write_data_parity_err_cnt),
4690 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4692 access_pio_host_addr_mem_cor_err_cnt),
4693 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4695 access_pio_host_addr_mem_unc_err_cnt),
4696 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4698 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4699 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4701 access_pio_init_sm_in_err_cnt),
4702 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4704 access_pio_ppmc_pbl_fifo_err_cnt),
4705 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4707 access_pio_credit_ret_fifo_parity_err_cnt),
4708 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4710 access_pio_v1_len_mem_bank1_cor_err_cnt),
4711 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4713 access_pio_v1_len_mem_bank0_cor_err_cnt),
4714 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4716 access_pio_v1_len_mem_bank1_unc_err_cnt),
4717 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4719 access_pio_v1_len_mem_bank0_unc_err_cnt),
4720 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4722 access_pio_sm_pkt_reset_parity_err_cnt),
4723 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4725 access_pio_pkt_evict_fifo_parity_err_cnt),
4726 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4727 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4729 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4730 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4732 access_pio_sbrdctl_crrel_parity_err_cnt),
4733 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4735 access_pio_pec_fifo_parity_err_cnt),
4736 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4738 access_pio_pcc_fifo_parity_err_cnt),
4739 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4741 access_pio_sb_mem_fifo1_err_cnt),
4742 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4744 access_pio_sb_mem_fifo0_err_cnt),
4745 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4747 access_pio_csr_parity_err_cnt),
4748 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4750 access_pio_write_addr_parity_err_cnt),
4751 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4753 access_pio_write_bad_ctxt_err_cnt),
4754 /* SendDmaErrStatus */
4755 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4757 access_sdma_pcie_req_tracking_cor_err_cnt),
4758 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4760 access_sdma_pcie_req_tracking_unc_err_cnt),
4761 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4763 access_sdma_csr_parity_err_cnt),
4764 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4766 access_sdma_rpy_tag_err_cnt),
4767 /* SendEgressErrStatus */
4768 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4770 access_tx_read_pio_memory_csr_unc_err_cnt),
4771 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4773 access_tx_read_sdma_memory_csr_err_cnt),
4774 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4776 access_tx_egress_fifo_cor_err_cnt),
4777 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4779 access_tx_read_pio_memory_cor_err_cnt),
4780 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4782 access_tx_read_sdma_memory_cor_err_cnt),
4783 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4785 access_tx_sb_hdr_cor_err_cnt),
4786 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4788 access_tx_credit_overrun_err_cnt),
4789 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4791 access_tx_launch_fifo8_cor_err_cnt),
4792 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4794 access_tx_launch_fifo7_cor_err_cnt),
4795 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4797 access_tx_launch_fifo6_cor_err_cnt),
4798 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4800 access_tx_launch_fifo5_cor_err_cnt),
4801 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4803 access_tx_launch_fifo4_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4806 access_tx_launch_fifo3_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4809 access_tx_launch_fifo2_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4812 access_tx_launch_fifo1_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4815 access_tx_launch_fifo0_cor_err_cnt),
4816 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4818 access_tx_credit_return_vl_err_cnt),
4819 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4821 access_tx_hcrc_insertion_err_cnt),
4822 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4824 access_tx_egress_fifo_unc_err_cnt),
4825 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4827 access_tx_read_pio_memory_unc_err_cnt),
4828 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4830 access_tx_read_sdma_memory_unc_err_cnt),
4831 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4833 access_tx_sb_hdr_unc_err_cnt),
4834 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4836 access_tx_credit_return_partiy_err_cnt),
4837 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4839 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4840 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4842 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4843 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4845 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4846 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4848 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4849 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4851 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4854 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4857 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4860 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4863 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4864 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4866 access_tx_sdma15_disallowed_packet_err_cnt),
4867 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4869 access_tx_sdma14_disallowed_packet_err_cnt),
4870 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4872 access_tx_sdma13_disallowed_packet_err_cnt),
4873 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4875 access_tx_sdma12_disallowed_packet_err_cnt),
4876 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4878 access_tx_sdma11_disallowed_packet_err_cnt),
4879 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4881 access_tx_sdma10_disallowed_packet_err_cnt),
4882 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4884 access_tx_sdma9_disallowed_packet_err_cnt),
4885 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4887 access_tx_sdma8_disallowed_packet_err_cnt),
4888 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4890 access_tx_sdma7_disallowed_packet_err_cnt),
4891 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4893 access_tx_sdma6_disallowed_packet_err_cnt),
4894 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4896 access_tx_sdma5_disallowed_packet_err_cnt),
4897 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4899 access_tx_sdma4_disallowed_packet_err_cnt),
4900 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4902 access_tx_sdma3_disallowed_packet_err_cnt),
4903 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4905 access_tx_sdma2_disallowed_packet_err_cnt),
4906 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4908 access_tx_sdma1_disallowed_packet_err_cnt),
4909 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4911 access_tx_sdma0_disallowed_packet_err_cnt),
4912 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4914 access_tx_config_parity_err_cnt),
4915 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4917 access_tx_sbrd_ctl_csr_parity_err_cnt),
4918 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4920 access_tx_launch_csr_parity_err_cnt),
4921 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4923 access_tx_illegal_vl_err_cnt),
4924 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4925 "TxSbrdCtlStateMachineParityErr", 0, 0,
4927 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4928 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4930 access_egress_reserved_10_err_cnt),
4931 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4933 access_egress_reserved_9_err_cnt),
4934 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4936 access_tx_sdma_launch_intf_parity_err_cnt),
4937 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4939 access_tx_pio_launch_intf_parity_err_cnt),
4940 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4942 access_egress_reserved_6_err_cnt),
4943 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4945 access_tx_incorrect_link_state_err_cnt),
4946 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4948 access_tx_linkdown_err_cnt),
4949 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4950 "EgressFifoUnderrunOrParityErr", 0, 0,
4952 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4953 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4955 access_egress_reserved_2_err_cnt),
4956 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4958 access_tx_pkt_integrity_mem_unc_err_cnt),
4959 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4961 access_tx_pkt_integrity_mem_cor_err_cnt),
4963 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4965 access_send_csr_write_bad_addr_err_cnt),
4966 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4968 access_send_csr_read_bad_addr_err_cnt),
4969 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4971 access_send_csr_parity_cnt),
4972 /* SendCtxtErrStatus */
4973 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4975 access_pio_write_out_of_bounds_err_cnt),
4976 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4978 access_pio_write_overflow_err_cnt),
4979 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4981 access_pio_write_crosses_boundary_err_cnt),
4982 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4984 access_pio_disallowed_packet_err_cnt),
4985 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4987 access_pio_inconsistent_sop_err_cnt),
4988 /* SendDmaEngErrStatus */
4989 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4991 access_sdma_header_request_fifo_cor_err_cnt),
4992 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4994 access_sdma_header_storage_cor_err_cnt),
4995 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4997 access_sdma_packet_tracking_cor_err_cnt),
4998 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5000 access_sdma_assembly_cor_err_cnt),
5001 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5003 access_sdma_desc_table_cor_err_cnt),
5004 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5006 access_sdma_header_request_fifo_unc_err_cnt),
5007 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5009 access_sdma_header_storage_unc_err_cnt),
5010 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5012 access_sdma_packet_tracking_unc_err_cnt),
5013 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5015 access_sdma_assembly_unc_err_cnt),
5016 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5018 access_sdma_desc_table_unc_err_cnt),
5019 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5021 access_sdma_timeout_err_cnt),
5022 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5024 access_sdma_header_length_err_cnt),
5025 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5027 access_sdma_header_address_err_cnt),
5028 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5030 access_sdma_header_select_err_cnt),
5031 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5033 access_sdma_reserved_9_err_cnt),
5034 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5036 access_sdma_packet_desc_overflow_err_cnt),
5037 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5039 access_sdma_length_mismatch_err_cnt),
5040 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5042 access_sdma_halt_err_cnt),
5043 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5045 access_sdma_mem_read_err_cnt),
5046 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5048 access_sdma_first_desc_err_cnt),
5049 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5051 access_sdma_tail_out_of_bounds_err_cnt),
5052 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5054 access_sdma_too_long_err_cnt),
5055 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5057 access_sdma_gen_mismatch_err_cnt),
5058 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5060 access_sdma_wrong_dw_err_cnt),
5063 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5064 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5066 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5068 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5070 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5072 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5074 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5076 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5078 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5079 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5080 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5081 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5082 CNTR_SYNTH | CNTR_VL),
5083 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5084 CNTR_SYNTH | CNTR_VL),
5085 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5086 CNTR_SYNTH | CNTR_VL),
5087 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5088 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5089 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5090 access_sw_link_dn_cnt),
5091 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5092 access_sw_link_up_cnt),
5093 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5094 access_sw_unknown_frame_cnt),
5095 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5096 access_sw_xmit_discards),
5097 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5098 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5099 access_sw_xmit_discards),
5100 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5101 access_xmit_constraint_errs),
5102 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5103 access_rcv_constraint_errs),
5104 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5105 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5106 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5107 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5108 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5109 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5110 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5111 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5112 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5113 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5114 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5115 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5116 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5117 access_sw_cpu_rc_acks),
5118 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5119 access_sw_cpu_rc_qacks),
5120 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5121 access_sw_cpu_rc_delayed_comp),
5122 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5123 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5124 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5125 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5126 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5127 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5128 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5129 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5130 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5131 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5132 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5133 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5134 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5135 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5136 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5137 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5138 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5139 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5140 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5141 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5142 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5143 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5144 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5145 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5146 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5147 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5148 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5149 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5150 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5151 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5152 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5153 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5154 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5155 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5156 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5157 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5158 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5159 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5160 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5161 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5162 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5163 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5164 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5165 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5166 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5167 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5168 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5169 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5170 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5171 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5172 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5173 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5174 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5175 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5176 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5177 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5178 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5179 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5180 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5181 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5182 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5183 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5184 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5185 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5186 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5187 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5188 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5189 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5190 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5191 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5192 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5193 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5194 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5195 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5196 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5197 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5198 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5199 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5200 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5201 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5204 /* ======================================================================== */
5206 /* return true if this is chip revision revision a */
5207 int is_ax(struct hfi1_devdata *dd)
5210 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5211 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5212 return (chip_rev_minor & 0xf0) == 0;
5215 /* return true if this is chip revision revision b */
5216 int is_bx(struct hfi1_devdata *dd)
5219 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5220 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5221 return (chip_rev_minor & 0xF0) == 0x10;
5225 * Append string s to buffer buf. Arguments curp and len are the current
5226 * position and remaining length, respectively.
5228 * return 0 on success, 1 on out of room
5230 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5234 int result = 0; /* success */
5237 /* add a comma, if first in the buffer */
5240 result = 1; /* out of room */
5247 /* copy the string */
5248 while ((c = *s++) != 0) {
5250 result = 1; /* out of room */
5258 /* write return values */
5266 * Using the given flag table, print a comma separated string into
5267 * the buffer. End in '*' if the buffer is too short.
5269 static char *flag_string(char *buf, int buf_len, u64 flags,
5270 struct flag_table *table, int table_size)
5278 /* make sure there is at least 2 so we can form "*" */
5282 len--; /* leave room for a nul */
5283 for (i = 0; i < table_size; i++) {
5284 if (flags & table[i].flag) {
5285 no_room = append_str(buf, &p, &len, table[i].str);
5288 flags &= ~table[i].flag;
5292 /* any undocumented bits left? */
5293 if (!no_room && flags) {
5294 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5295 no_room = append_str(buf, &p, &len, extra);
5298 /* add * if ran out of room */
5300 /* may need to back up to add space for a '*' */
5306 /* add final nul - space already allocated above */
5311 /* first 8 CCE error interrupt source names */
5312 static const char * const cce_misc_names[] = {
5313 "CceErrInt", /* 0 */
5314 "RxeErrInt", /* 1 */
5315 "MiscErrInt", /* 2 */
5316 "Reserved3", /* 3 */
5317 "PioErrInt", /* 4 */
5318 "SDmaErrInt", /* 5 */
5319 "EgressErrInt", /* 6 */
5324 * Return the miscellaneous error interrupt name.
5326 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5328 if (source < ARRAY_SIZE(cce_misc_names))
5329 strncpy(buf, cce_misc_names[source], bsize);
5331 snprintf(buf, bsize, "Reserved%u",
5332 source + IS_GENERAL_ERR_START);
5338 * Return the SDMA engine error interrupt name.
5340 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5342 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5347 * Return the send context error interrupt name.
5349 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5351 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5355 static const char * const various_names[] = {
5364 * Return the various interrupt name.
5366 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5368 if (source < ARRAY_SIZE(various_names))
5369 strncpy(buf, various_names[source], bsize);
5371 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5376 * Return the DC interrupt name.
5378 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5380 static const char * const dc_int_names[] = {
5384 "lbm" /* local block merge */
5387 if (source < ARRAY_SIZE(dc_int_names))
5388 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5390 snprintf(buf, bsize, "DCInt%u", source);
5394 static const char * const sdma_int_names[] = {
5401 * Return the SDMA engine interrupt name.
5403 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5405 /* what interrupt */
5406 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5408 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5410 if (likely(what < 3))
5411 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5413 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5418 * Return the receive available interrupt name.
5420 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5422 snprintf(buf, bsize, "RcvAvailInt%u", source);
5427 * Return the receive urgent interrupt name.
5429 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5431 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5436 * Return the send credit interrupt name.
5438 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5440 snprintf(buf, bsize, "SendCreditInt%u", source);
5445 * Return the reserved interrupt name.
5447 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5449 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5453 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5455 return flag_string(buf, buf_len, flags,
5456 cce_err_status_flags,
5457 ARRAY_SIZE(cce_err_status_flags));
5460 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5462 return flag_string(buf, buf_len, flags,
5463 rxe_err_status_flags,
5464 ARRAY_SIZE(rxe_err_status_flags));
5467 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5469 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5470 ARRAY_SIZE(misc_err_status_flags));
5473 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5475 return flag_string(buf, buf_len, flags,
5476 pio_err_status_flags,
5477 ARRAY_SIZE(pio_err_status_flags));
5480 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5482 return flag_string(buf, buf_len, flags,
5483 sdma_err_status_flags,
5484 ARRAY_SIZE(sdma_err_status_flags));
5487 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5489 return flag_string(buf, buf_len, flags,
5490 egress_err_status_flags,
5491 ARRAY_SIZE(egress_err_status_flags));
5494 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5496 return flag_string(buf, buf_len, flags,
5497 egress_err_info_flags,
5498 ARRAY_SIZE(egress_err_info_flags));
5501 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5503 return flag_string(buf, buf_len, flags,
5504 send_err_status_flags,
5505 ARRAY_SIZE(send_err_status_flags));
5508 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5514 * For most these errors, there is nothing that can be done except
5515 * report or record it.
5517 dd_dev_info(dd, "CCE Error: %s\n",
5518 cce_err_status_string(buf, sizeof(buf), reg));
5520 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5521 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5522 /* this error requires a manual drop into SPC freeze mode */
5524 start_freeze_handling(dd->pport, FREEZE_SELF);
5527 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5528 if (reg & (1ull << i)) {
5529 incr_cntr64(&dd->cce_err_status_cnt[i]);
5530 /* maintain a counter over all cce_err_status errors */
5531 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5537 * Check counters for receive errors that do not have an interrupt
5538 * associated with them.
5540 #define RCVERR_CHECK_TIME 10
5541 static void update_rcverr_timer(unsigned long opaque)
5543 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5544 struct hfi1_pportdata *ppd = dd->pport;
5545 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5547 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5548 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5549 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5550 set_link_down_reason(
5551 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5552 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5553 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5555 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5557 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5560 static int init_rcverr(struct hfi1_devdata *dd)
5562 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5563 /* Assume the hardware counter has been reset */
5564 dd->rcv_ovfl_cnt = 0;
5565 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5568 static void free_rcverr(struct hfi1_devdata *dd)
5570 if (dd->rcverr_timer.data)
5571 del_timer_sync(&dd->rcverr_timer);
5572 dd->rcverr_timer.data = 0;
5575 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5580 dd_dev_info(dd, "Receive Error: %s\n",
5581 rxe_err_status_string(buf, sizeof(buf), reg));
5583 if (reg & ALL_RXE_FREEZE_ERR) {
5587 * Freeze mode recovery is disabled for the errors
5588 * in RXE_FREEZE_ABORT_MASK
5590 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5591 flags = FREEZE_ABORT;
5593 start_freeze_handling(dd->pport, flags);
5596 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5597 if (reg & (1ull << i))
5598 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5602 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5607 dd_dev_info(dd, "Misc Error: %s",
5608 misc_err_status_string(buf, sizeof(buf), reg));
5609 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5610 if (reg & (1ull << i))
5611 incr_cntr64(&dd->misc_err_status_cnt[i]);
5615 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5620 dd_dev_info(dd, "PIO Error: %s\n",
5621 pio_err_status_string(buf, sizeof(buf), reg));
5623 if (reg & ALL_PIO_FREEZE_ERR)
5624 start_freeze_handling(dd->pport, 0);
5626 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5627 if (reg & (1ull << i))
5628 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5632 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5637 dd_dev_info(dd, "SDMA Error: %s\n",
5638 sdma_err_status_string(buf, sizeof(buf), reg));
5640 if (reg & ALL_SDMA_FREEZE_ERR)
5641 start_freeze_handling(dd->pport, 0);
5643 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5644 if (reg & (1ull << i))
5645 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5649 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5651 incr_cntr64(&ppd->port_xmit_discards);
5654 static void count_port_inactive(struct hfi1_devdata *dd)
5656 __count_port_discards(dd->pport);
5660 * We have had a "disallowed packet" error during egress. Determine the
5661 * integrity check which failed, and update relevant error counter, etc.
5663 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5664 * bit of state per integrity check, and so we can miss the reason for an
5665 * egress error if more than one packet fails the same integrity check
5666 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5668 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5671 struct hfi1_pportdata *ppd = dd->pport;
5672 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5673 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5676 /* clear down all observed info as quickly as possible after read */
5677 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5680 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5681 info, egress_err_info_string(buf, sizeof(buf), info), src);
5683 /* Eventually add other counters for each bit */
5684 if (info & PORT_DISCARD_EGRESS_ERRS) {
5688 * Count all applicable bits as individual errors and
5689 * attribute them to the packet that triggered this handler.
5690 * This may not be completely accurate due to limitations
5691 * on the available hardware error information. There is
5692 * a single information register and any number of error
5693 * packets may have occurred and contributed to it before
5694 * this routine is called. This means that:
5695 * a) If multiple packets with the same error occur before
5696 * this routine is called, earlier packets are missed.
5697 * There is only a single bit for each error type.
5698 * b) Errors may not be attributed to the correct VL.
5699 * The driver is attributing all bits in the info register
5700 * to the packet that triggered this call, but bits
5701 * could be an accumulation of different packets with
5703 * c) A single error packet may have multiple counts attached
5704 * to it. There is no way for the driver to know if
5705 * multiple bits set in the info register are due to a
5706 * single packet or multiple packets. The driver assumes
5709 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5710 for (i = 0; i < weight; i++) {
5711 __count_port_discards(ppd);
5712 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5713 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5715 incr_cntr64(&ppd->port_xmit_discards_vl
5722 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5723 * register. Does it represent a 'port inactive' error?
5725 static inline int port_inactive_err(u64 posn)
5727 return (posn >= SEES(TX_LINKDOWN) &&
5728 posn <= SEES(TX_INCORRECT_LINK_STATE));
5732 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5733 * register. Does it represent a 'disallowed packet' error?
5735 static inline int disallowed_pkt_err(int posn)
5737 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5738 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5742 * Input value is a bit position of one of the SDMA engine disallowed
5743 * packet errors. Return which engine. Use of this must be guarded by
5744 * disallowed_pkt_err().
5746 static inline int disallowed_pkt_engine(int posn)
5748 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5752 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5755 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5757 struct sdma_vl_map *m;
5761 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5765 m = rcu_dereference(dd->sdma_map);
5766 vl = m->engine_to_vl[engine];
5773 * Translate the send context (sofware index) into a VL. Return -1 if the
5774 * translation cannot be done.
5776 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5778 struct send_context_info *sci;
5779 struct send_context *sc;
5782 sci = &dd->send_contexts[sw_index];
5784 /* there is no information for user (PSM) and ack contexts */
5785 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5791 if (dd->vld[15].sc == sc)
5793 for (i = 0; i < num_vls; i++)
5794 if (dd->vld[i].sc == sc)
5800 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5802 u64 reg_copy = reg, handled = 0;
5806 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5807 start_freeze_handling(dd->pport, 0);
5808 else if (is_ax(dd) &&
5809 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5810 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5811 start_freeze_handling(dd->pport, 0);
5814 int posn = fls64(reg_copy);
5815 /* fls64() returns a 1-based offset, we want it zero based */
5816 int shift = posn - 1;
5817 u64 mask = 1ULL << shift;
5819 if (port_inactive_err(shift)) {
5820 count_port_inactive(dd);
5822 } else if (disallowed_pkt_err(shift)) {
5823 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5825 handle_send_egress_err_info(dd, vl);
5834 dd_dev_info(dd, "Egress Error: %s\n",
5835 egress_err_status_string(buf, sizeof(buf), reg));
5837 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5838 if (reg & (1ull << i))
5839 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5843 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5848 dd_dev_info(dd, "Send Error: %s\n",
5849 send_err_status_string(buf, sizeof(buf), reg));
5851 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5852 if (reg & (1ull << i))
5853 incr_cntr64(&dd->send_err_status_cnt[i]);
5858 * The maximum number of times the error clear down will loop before
5859 * blocking a repeating error. This value is arbitrary.
5861 #define MAX_CLEAR_COUNT 20
5864 * Clear and handle an error register. All error interrupts are funneled
5865 * through here to have a central location to correctly handle single-
5866 * or multi-shot errors.
5868 * For non per-context registers, call this routine with a context value
5869 * of 0 so the per-context offset is zero.
5871 * If the handler loops too many times, assume that something is wrong
5872 * and can't be fixed, so mask the error bits.
5874 static void interrupt_clear_down(struct hfi1_devdata *dd,
5876 const struct err_reg_info *eri)
5881 /* read in a loop until no more errors are seen */
5884 reg = read_kctxt_csr(dd, context, eri->status);
5887 write_kctxt_csr(dd, context, eri->clear, reg);
5888 if (likely(eri->handler))
5889 eri->handler(dd, context, reg);
5891 if (count > MAX_CLEAR_COUNT) {
5894 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5897 * Read-modify-write so any other masked bits
5900 mask = read_kctxt_csr(dd, context, eri->mask);
5902 write_kctxt_csr(dd, context, eri->mask, mask);
5909 * CCE block "misc" interrupt. Source is < 16.
5911 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5913 const struct err_reg_info *eri = &misc_errs[source];
5916 interrupt_clear_down(dd, 0, eri);
5918 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5923 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5925 return flag_string(buf, buf_len, flags,
5926 sc_err_status_flags,
5927 ARRAY_SIZE(sc_err_status_flags));
5931 * Send context error interrupt. Source (hw_context) is < 160.
5933 * All send context errors cause the send context to halt. The normal
5934 * clear-down mechanism cannot be used because we cannot clear the
5935 * error bits until several other long-running items are done first.
5936 * This is OK because with the context halted, nothing else is going
5937 * to happen on it anyway.
5939 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5940 unsigned int hw_context)
5942 struct send_context_info *sci;
5943 struct send_context *sc;
5948 unsigned long irq_flags;
5950 sw_index = dd->hw_to_sw[hw_context];
5951 if (sw_index >= dd->num_send_contexts) {
5953 "out of range sw index %u for send context %u\n",
5954 sw_index, hw_context);
5957 sci = &dd->send_contexts[sw_index];
5958 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5961 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5962 sw_index, hw_context);
5963 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5967 /* tell the software that a halt has begun */
5968 sc_stop(sc, SCF_HALTED);
5970 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5972 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5973 send_context_err_status_string(flags, sizeof(flags),
5976 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5977 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5980 * Automatically restart halted kernel contexts out of interrupt
5981 * context. User contexts must ask the driver to restart the context.
5983 if (sc->type != SC_USER)
5984 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5985 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5988 * Update the counters for the corresponding status bits.
5989 * Note that these particular counters are aggregated over all
5992 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5993 if (status & (1ull << i))
5994 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5998 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5999 unsigned int source, u64 status)
6001 struct sdma_engine *sde;
6004 sde = &dd->per_sdma[source];
6005 #ifdef CONFIG_SDMA_VERBOSITY
6006 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6007 slashstrip(__FILE__), __LINE__, __func__);
6008 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6009 sde->this_idx, source, (unsigned long long)status);
6012 sdma_engine_error(sde, status);
6015 * Update the counters for the corresponding status bits.
6016 * Note that these particular counters are aggregated over
6017 * all 16 DMA engines.
6019 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6020 if (status & (1ull << i))
6021 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6026 * CCE block SDMA error interrupt. Source is < 16.
6028 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6030 #ifdef CONFIG_SDMA_VERBOSITY
6031 struct sdma_engine *sde = &dd->per_sdma[source];
6033 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6034 slashstrip(__FILE__), __LINE__, __func__);
6035 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6037 sdma_dumpstate(sde);
6039 interrupt_clear_down(dd, source, &sdma_eng_err);
6043 * CCE block "various" interrupt. Source is < 8.
6045 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6047 const struct err_reg_info *eri = &various_err[source];
6050 * TCritInt cannot go through interrupt_clear_down()
6051 * because it is not a second tier interrupt. The handler
6052 * should be called directly.
6054 if (source == TCRIT_INT_SOURCE)
6055 handle_temp_err(dd);
6056 else if (eri->handler)
6057 interrupt_clear_down(dd, 0, eri);
6060 "%s: Unimplemented/reserved interrupt %d\n",
6064 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6066 /* src_ctx is always zero */
6067 struct hfi1_pportdata *ppd = dd->pport;
6068 unsigned long flags;
6069 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6071 if (reg & QSFP_HFI0_MODPRST_N) {
6072 if (!qsfp_mod_present(ppd)) {
6073 dd_dev_info(dd, "%s: QSFP module removed\n",
6076 ppd->driver_link_ready = 0;
6078 * Cable removed, reset all our information about the
6079 * cache and cable capabilities
6082 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6084 * We don't set cache_refresh_required here as we expect
6085 * an interrupt when a cable is inserted
6087 ppd->qsfp_info.cache_valid = 0;
6088 ppd->qsfp_info.reset_needed = 0;
6089 ppd->qsfp_info.limiting_active = 0;
6090 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6092 /* Invert the ModPresent pin now to detect plug-in */
6093 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6094 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6096 if ((ppd->offline_disabled_reason >
6098 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6099 (ppd->offline_disabled_reason ==
6100 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6101 ppd->offline_disabled_reason =
6103 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6105 if (ppd->host_link_state == HLS_DN_POLL) {
6107 * The link is still in POLL. This means
6108 * that the normal link down processing
6109 * will not happen. We have to do it here
6110 * before turning the DC off.
6112 queue_work(ppd->link_wq, &ppd->link_down_work);
6115 dd_dev_info(dd, "%s: QSFP module inserted\n",
6118 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6119 ppd->qsfp_info.cache_valid = 0;
6120 ppd->qsfp_info.cache_refresh_required = 1;
6121 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6125 * Stop inversion of ModPresent pin to detect
6126 * removal of the cable
6128 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6129 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6130 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6132 ppd->offline_disabled_reason =
6133 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6137 if (reg & QSFP_HFI0_INT_N) {
6138 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6140 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6141 ppd->qsfp_info.check_interrupt_flags = 1;
6142 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6145 /* Schedule the QSFP work only if there is a cable attached. */
6146 if (qsfp_mod_present(ppd))
6147 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6150 static int request_host_lcb_access(struct hfi1_devdata *dd)
6154 ret = do_8051_command(dd, HCMD_MISC,
6155 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6156 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6157 if (ret != HCMD_SUCCESS) {
6158 dd_dev_err(dd, "%s: command failed with error %d\n",
6161 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6164 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6168 ret = do_8051_command(dd, HCMD_MISC,
6169 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6170 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6171 if (ret != HCMD_SUCCESS) {
6172 dd_dev_err(dd, "%s: command failed with error %d\n",
6175 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6179 * Set the LCB selector - allow host access. The DCC selector always
6180 * points to the host.
6182 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6184 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6185 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6186 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6190 * Clear the LCB selector - allow 8051 access. The DCC selector always
6191 * points to the host.
6193 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6195 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6196 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6200 * Acquire LCB access from the 8051. If the host already has access,
6201 * just increment a counter. Otherwise, inform the 8051 that the
6202 * host is taking access.
6206 * -EBUSY if the 8051 has control and cannot be disturbed
6207 * -errno if unable to acquire access from the 8051
6209 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6211 struct hfi1_pportdata *ppd = dd->pport;
6215 * Use the host link state lock so the operation of this routine
6216 * { link state check, selector change, count increment } can occur
6217 * as a unit against a link state change. Otherwise there is a
6218 * race between the state change and the count increment.
6221 mutex_lock(&ppd->hls_lock);
6223 while (!mutex_trylock(&ppd->hls_lock))
6227 /* this access is valid only when the link is up */
6228 if (ppd->host_link_state & HLS_DOWN) {
6229 dd_dev_info(dd, "%s: link state %s not up\n",
6230 __func__, link_state_name(ppd->host_link_state));
6235 if (dd->lcb_access_count == 0) {
6236 ret = request_host_lcb_access(dd);
6239 "%s: unable to acquire LCB access, err %d\n",
6243 set_host_lcb_access(dd);
6245 dd->lcb_access_count++;
6247 mutex_unlock(&ppd->hls_lock);
6252 * Release LCB access by decrementing the use count. If the count is moving
6253 * from 1 to 0, inform 8051 that it has control back.
6257 * -errno if unable to release access to the 8051
6259 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6264 * Use the host link state lock because the acquire needed it.
6265 * Here, we only need to keep { selector change, count decrement }
6269 mutex_lock(&dd->pport->hls_lock);
6271 while (!mutex_trylock(&dd->pport->hls_lock))
6275 if (dd->lcb_access_count == 0) {
6276 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6281 if (dd->lcb_access_count == 1) {
6282 set_8051_lcb_access(dd);
6283 ret = request_8051_lcb_access(dd);
6286 "%s: unable to release LCB access, err %d\n",
6288 /* restore host access if the grant didn't work */
6289 set_host_lcb_access(dd);
6293 dd->lcb_access_count--;
6295 mutex_unlock(&dd->pport->hls_lock);
6300 * Initialize LCB access variables and state. Called during driver load,
6301 * after most of the initialization is finished.
6303 * The DC default is LCB access on for the host. The driver defaults to
6304 * leaving access to the 8051. Assign access now - this constrains the call
6305 * to this routine to be after all LCB set-up is done. In particular, after
6306 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6308 static void init_lcb_access(struct hfi1_devdata *dd)
6310 dd->lcb_access_count = 0;
6314 * Write a response back to a 8051 request.
6316 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6318 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6319 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6321 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6322 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6326 * Handle host requests from the 8051.
6328 static void handle_8051_request(struct hfi1_pportdata *ppd)
6330 struct hfi1_devdata *dd = ppd->dd;
6335 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6336 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6337 return; /* no request */
6339 /* zero out COMPLETED so the response is seen */
6340 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6342 /* extract request details */
6343 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6344 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6345 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6346 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6349 case HREQ_LOAD_CONFIG:
6350 case HREQ_SAVE_CONFIG:
6351 case HREQ_READ_CONFIG:
6352 case HREQ_SET_TX_EQ_ABS:
6353 case HREQ_SET_TX_EQ_REL:
6355 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6357 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6359 case HREQ_CONFIG_DONE:
6360 hreq_response(dd, HREQ_SUCCESS, 0);
6363 case HREQ_INTERFACE_TEST:
6364 hreq_response(dd, HREQ_SUCCESS, data);
6367 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6368 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6374 * Set up allocation unit vaulue.
6376 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6378 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6380 /* do not modify other values in the register */
6381 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6382 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6383 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6387 * Set up initial VL15 credits of the remote. Assumes the rest of
6388 * the CM credit registers are zero from a previous global or credit reset.
6389 * Shared limit for VL15 will always be 0.
6391 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6393 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6395 /* set initial values for total and shared credit limit */
6396 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6397 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6400 * Set total limit to be equal to VL15 credits.
6401 * Leave shared limit at 0.
6403 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6404 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6406 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6407 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6411 * Zero all credit details from the previous connection and
6412 * reset the CM manager's internal counters.
6414 void reset_link_credits(struct hfi1_devdata *dd)
6418 /* remove all previous VL credit limits */
6419 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6420 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6421 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6422 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6423 /* reset the CM block */
6424 pio_send_control(dd, PSC_CM_RESET);
6425 /* reset cached value */
6426 dd->vl15buf_cached = 0;
6429 /* convert a vCU to a CU */
6430 static u32 vcu_to_cu(u8 vcu)
6435 /* convert a CU to a vCU */
6436 static u8 cu_to_vcu(u32 cu)
6441 /* convert a vAU to an AU */
6442 static u32 vau_to_au(u8 vau)
6444 return 8 * (1 << vau);
6447 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6449 ppd->sm_trap_qp = 0x0;
6454 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6456 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6460 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6461 write_csr(dd, DC_LCB_CFG_RUN, 0);
6462 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6463 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6464 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6465 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6466 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6467 reg = read_csr(dd, DCC_CFG_RESET);
6468 write_csr(dd, DCC_CFG_RESET, reg |
6469 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6470 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6471 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6473 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6474 write_csr(dd, DCC_CFG_RESET, reg);
6475 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6480 * This routine should be called after the link has been transitioned to
6481 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6484 * The expectation is that the caller of this routine would have taken
6485 * care of properly transitioning the link into the correct state.
6486 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6487 * before calling this function.
6489 static void _dc_shutdown(struct hfi1_devdata *dd)
6491 lockdep_assert_held(&dd->dc8051_lock);
6493 if (dd->dc_shutdown)
6496 dd->dc_shutdown = 1;
6497 /* Shutdown the LCB */
6498 lcb_shutdown(dd, 1);
6500 * Going to OFFLINE would have causes the 8051 to put the
6501 * SerDes into reset already. Just need to shut down the 8051,
6504 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6507 static void dc_shutdown(struct hfi1_devdata *dd)
6509 mutex_lock(&dd->dc8051_lock);
6511 mutex_unlock(&dd->dc8051_lock);
6515 * Calling this after the DC has been brought out of reset should not
6517 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6518 * before calling this function.
6520 static void _dc_start(struct hfi1_devdata *dd)
6522 lockdep_assert_held(&dd->dc8051_lock);
6524 if (!dd->dc_shutdown)
6527 /* Take the 8051 out of reset */
6528 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6529 /* Wait until 8051 is ready */
6530 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6531 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6534 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6535 write_csr(dd, DCC_CFG_RESET, 0x10);
6536 /* lcb_shutdown() with abort=1 does not restore these */
6537 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6538 dd->dc_shutdown = 0;
6541 static void dc_start(struct hfi1_devdata *dd)
6543 mutex_lock(&dd->dc8051_lock);
6545 mutex_unlock(&dd->dc8051_lock);
6549 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6551 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6553 u64 rx_radr, tx_radr;
6556 if (dd->icode != ICODE_FPGA_EMULATION)
6560 * These LCB defaults on emulator _s are good, nothing to do here:
6561 * LCB_CFG_TX_FIFOS_RADR
6562 * LCB_CFG_RX_FIFOS_RADR
6564 * LCB_CFG_IGNORE_LOST_RCLK
6566 if (is_emulator_s(dd))
6568 /* else this is _p */
6570 version = emulator_rev(dd);
6572 version = 0x2d; /* all B0 use 0x2d or higher settings */
6574 if (version <= 0x12) {
6575 /* release 0x12 and below */
6578 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6579 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6580 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6583 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6584 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6585 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6587 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6588 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6590 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6591 } else if (version <= 0x18) {
6592 /* release 0x13 up to 0x18 */
6593 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6595 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6596 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6597 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6598 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6599 } else if (version == 0x19) {
6601 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6603 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6604 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6605 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6606 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6607 } else if (version == 0x1a) {
6609 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6611 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6612 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6613 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6614 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6615 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6617 /* release 0x1b and higher */
6618 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6620 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6621 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6622 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6623 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6626 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6627 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6628 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6629 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6630 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6634 * Handle a SMA idle message
6636 * This is a work-queue function outside of the interrupt.
6638 void handle_sma_message(struct work_struct *work)
6640 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6642 struct hfi1_devdata *dd = ppd->dd;
6647 * msg is bytes 1-4 of the 40-bit idle message - the command code
6650 ret = read_idle_sma(dd, &msg);
6653 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6655 * React to the SMA message. Byte[1] (0 for us) is the command.
6657 switch (msg & 0xff) {
6660 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6663 * Only expected in INIT or ARMED, discard otherwise.
6665 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6666 ppd->neighbor_normal = 1;
6668 case SMA_IDLE_ACTIVE:
6670 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6673 * Can activate the node. Discard otherwise.
6675 if (ppd->host_link_state == HLS_UP_ARMED &&
6676 ppd->is_active_optimize_enabled) {
6677 ppd->neighbor_normal = 1;
6678 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6682 "%s: received Active SMA idle message, couldn't set link to Active\n",
6688 "%s: received unexpected SMA idle message 0x%llx\n",
6694 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6697 unsigned long flags;
6699 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6700 rcvctrl = read_csr(dd, RCV_CTRL);
6703 write_csr(dd, RCV_CTRL, rcvctrl);
6704 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6707 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6709 adjust_rcvctrl(dd, add, 0);
6712 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6714 adjust_rcvctrl(dd, 0, clear);
6718 * Called from all interrupt handlers to start handling an SPC freeze.
6720 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6722 struct hfi1_devdata *dd = ppd->dd;
6723 struct send_context *sc;
6726 if (flags & FREEZE_SELF)
6727 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6729 /* enter frozen mode */
6730 dd->flags |= HFI1_FROZEN;
6732 /* notify all SDMA engines that they are going into a freeze */
6733 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6735 /* do halt pre-handling on all enabled send contexts */
6736 for (i = 0; i < dd->num_send_contexts; i++) {
6737 sc = dd->send_contexts[i].sc;
6738 if (sc && (sc->flags & SCF_ENABLED))
6739 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6742 /* Send context are frozen. Notify user space */
6743 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6745 if (flags & FREEZE_ABORT) {
6747 "Aborted freeze recovery. Please REBOOT system\n");
6750 /* queue non-interrupt handler */
6751 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6755 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6756 * depending on the "freeze" parameter.
6758 * No need to return an error if it times out, our only option
6759 * is to proceed anyway.
6761 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6763 unsigned long timeout;
6766 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6768 reg = read_csr(dd, CCE_STATUS);
6770 /* waiting until all indicators are set */
6771 if ((reg & ALL_FROZE) == ALL_FROZE)
6772 return; /* all done */
6774 /* waiting until all indicators are clear */
6775 if ((reg & ALL_FROZE) == 0)
6776 return; /* all done */
6779 if (time_after(jiffies, timeout)) {
6781 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6782 freeze ? "" : "un", reg & ALL_FROZE,
6783 freeze ? ALL_FROZE : 0ull);
6786 usleep_range(80, 120);
6791 * Do all freeze handling for the RXE block.
6793 static void rxe_freeze(struct hfi1_devdata *dd)
6796 struct hfi1_ctxtdata *rcd;
6799 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6801 /* disable all receive contexts */
6802 for (i = 0; i < dd->num_rcv_contexts; i++) {
6803 rcd = hfi1_rcd_get_by_index(dd, i);
6804 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6810 * Unfreeze handling for the RXE block - kernel contexts only.
6811 * This will also enable the port. User contexts will do unfreeze
6812 * handling on a per-context basis as they call into the driver.
6815 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6819 struct hfi1_ctxtdata *rcd;
6821 /* enable all kernel contexts */
6822 for (i = 0; i < dd->num_rcv_contexts; i++) {
6823 rcd = hfi1_rcd_get_by_index(dd, i);
6825 /* Ensure all non-user contexts(including vnic) are enabled */
6826 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
6830 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6831 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6832 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6833 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6834 hfi1_rcvctrl(dd, rcvmask, rcd);
6839 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6843 * Non-interrupt SPC freeze handling.
6845 * This is a work-queue function outside of the triggering interrupt.
6847 void handle_freeze(struct work_struct *work)
6849 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6851 struct hfi1_devdata *dd = ppd->dd;
6853 /* wait for freeze indicators on all affected blocks */
6854 wait_for_freeze_status(dd, 1);
6856 /* SPC is now frozen */
6858 /* do send PIO freeze steps */
6861 /* do send DMA freeze steps */
6864 /* do send egress freeze steps - nothing to do */
6866 /* do receive freeze steps */
6870 * Unfreeze the hardware - clear the freeze, wait for each
6871 * block's frozen bit to clear, then clear the frozen flag.
6873 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6874 wait_for_freeze_status(dd, 0);
6877 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6878 wait_for_freeze_status(dd, 1);
6879 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6880 wait_for_freeze_status(dd, 0);
6883 /* do send PIO unfreeze steps for kernel contexts */
6884 pio_kernel_unfreeze(dd);
6886 /* do send DMA unfreeze steps */
6889 /* do send egress unfreeze steps - nothing to do */
6891 /* do receive unfreeze steps for kernel contexts */
6892 rxe_kernel_unfreeze(dd);
6895 * The unfreeze procedure touches global device registers when
6896 * it disables and re-enables RXE. Mark the device unfrozen
6897 * after all that is done so other parts of the driver waiting
6898 * for the device to unfreeze don't do things out of order.
6900 * The above implies that the meaning of HFI1_FROZEN flag is
6901 * "Device has gone into freeze mode and freeze mode handling
6902 * is still in progress."
6904 * The flag will be removed when freeze mode processing has
6907 dd->flags &= ~HFI1_FROZEN;
6908 wake_up(&dd->event_queue);
6910 /* no longer frozen */
6914 * Handle a link up interrupt from the 8051.
6916 * This is a work-queue function outside of the interrupt.
6918 void handle_link_up(struct work_struct *work)
6920 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6922 struct hfi1_devdata *dd = ppd->dd;
6924 set_link_state(ppd, HLS_UP_INIT);
6926 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6929 * OPA specifies that certain counters are cleared on a transition
6930 * to link up, so do that.
6932 clear_linkup_counters(dd);
6934 * And (re)set link up default values.
6936 set_linkup_defaults(ppd);
6939 * Set VL15 credits. Use cached value from verify cap interrupt.
6940 * In case of quick linkup or simulator, vl15 value will be set by
6941 * handle_linkup_change. VerifyCap interrupt handler will not be
6942 * called in those scenarios.
6944 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6945 set_up_vl15(dd, dd->vl15buf_cached);
6947 /* enforce link speed enabled */
6948 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6949 /* oops - current speed is not enabled, bounce */
6951 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6952 ppd->link_speed_active, ppd->link_speed_enabled);
6953 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6954 OPA_LINKDOWN_REASON_SPEED_POLICY);
6955 set_link_state(ppd, HLS_DN_OFFLINE);
6961 * Several pieces of LNI information were cached for SMA in ppd.
6962 * Reset these on link down
6964 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6966 ppd->neighbor_guid = 0;
6967 ppd->neighbor_port_number = 0;
6968 ppd->neighbor_type = 0;
6969 ppd->neighbor_fm_security = 0;
6972 static const char * const link_down_reason_strs[] = {
6973 [OPA_LINKDOWN_REASON_NONE] = "None",
6974 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
6975 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6976 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6977 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6978 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6979 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6980 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6981 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6982 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6983 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6984 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6985 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6986 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6987 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6988 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6989 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6990 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6991 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6992 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6993 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6994 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6995 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6996 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6997 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6998 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6999 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7000 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7001 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7002 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7003 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7004 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7005 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7006 "Excessive buffer overrun",
7007 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7008 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7009 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7010 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7011 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7012 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7013 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7014 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7015 "Local media not installed",
7016 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7017 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7018 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7019 "End to end not installed",
7020 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7021 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7022 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7023 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7024 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7025 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7028 /* return the neighbor link down reason string */
7029 static const char *link_down_reason_str(u8 reason)
7031 const char *str = NULL;
7033 if (reason < ARRAY_SIZE(link_down_reason_strs))
7034 str = link_down_reason_strs[reason];
7042 * Handle a link down interrupt from the 8051.
7044 * This is a work-queue function outside of the interrupt.
7046 void handle_link_down(struct work_struct *work)
7048 u8 lcl_reason, neigh_reason = 0;
7049 u8 link_down_reason;
7050 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7053 static const char ldr_str[] = "Link down reason: ";
7055 if ((ppd->host_link_state &
7056 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7057 ppd->port_type == PORT_TYPE_FIXED)
7058 ppd->offline_disabled_reason =
7059 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7061 /* Go offline first, then deal with reading/writing through 8051 */
7062 was_up = !!(ppd->host_link_state & HLS_UP);
7063 set_link_state(ppd, HLS_DN_OFFLINE);
7064 xchg(&ppd->is_link_down_queued, 0);
7068 /* link down reason is only valid if the link was up */
7069 read_link_down_reason(ppd->dd, &link_down_reason);
7070 switch (link_down_reason) {
7071 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7072 /* the link went down, no idle message reason */
7073 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7076 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7078 * The neighbor reason is only valid if an idle message
7079 * was received for it.
7081 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7082 dd_dev_info(ppd->dd,
7083 "%sNeighbor link down message %d, %s\n",
7084 ldr_str, neigh_reason,
7085 link_down_reason_str(neigh_reason));
7087 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7088 dd_dev_info(ppd->dd,
7089 "%sHost requested link to go offline\n",
7093 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7094 ldr_str, link_down_reason);
7099 * If no reason, assume peer-initiated but missed
7100 * LinkGoingDown idle flits.
7102 if (neigh_reason == 0)
7103 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7105 /* went down while polling or going up */
7106 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7109 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7111 /* inform the SMA when the link transitions from up to down */
7112 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7113 ppd->neigh_link_down_reason.sma == 0) {
7114 ppd->local_link_down_reason.sma =
7115 ppd->local_link_down_reason.latest;
7116 ppd->neigh_link_down_reason.sma =
7117 ppd->neigh_link_down_reason.latest;
7120 reset_neighbor_info(ppd);
7122 /* disable the port */
7123 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7126 * If there is no cable attached, turn the DC off. Otherwise,
7127 * start the link bring up.
7129 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7130 dc_shutdown(ppd->dd);
7135 void handle_link_bounce(struct work_struct *work)
7137 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7141 * Only do something if the link is currently up.
7143 if (ppd->host_link_state & HLS_UP) {
7144 set_link_state(ppd, HLS_DN_OFFLINE);
7147 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7148 __func__, link_state_name(ppd->host_link_state));
7153 * Mask conversion: Capability exchange to Port LTP. The capability
7154 * exchange has an implicit 16b CRC that is mandatory.
7156 static int cap_to_port_ltp(int cap)
7158 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7160 if (cap & CAP_CRC_14B)
7161 port_ltp |= PORT_LTP_CRC_MODE_14;
7162 if (cap & CAP_CRC_48B)
7163 port_ltp |= PORT_LTP_CRC_MODE_48;
7164 if (cap & CAP_CRC_12B_16B_PER_LANE)
7165 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7171 * Convert an OPA Port LTP mask to capability mask
7173 int port_ltp_to_cap(int port_ltp)
7177 if (port_ltp & PORT_LTP_CRC_MODE_14)
7178 cap_mask |= CAP_CRC_14B;
7179 if (port_ltp & PORT_LTP_CRC_MODE_48)
7180 cap_mask |= CAP_CRC_48B;
7181 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7182 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7188 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7190 static int lcb_to_port_ltp(int lcb_crc)
7194 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7195 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7196 else if (lcb_crc == LCB_CRC_48B)
7197 port_ltp = PORT_LTP_CRC_MODE_48;
7198 else if (lcb_crc == LCB_CRC_14B)
7199 port_ltp = PORT_LTP_CRC_MODE_14;
7201 port_ltp = PORT_LTP_CRC_MODE_16;
7207 * Our neighbor has indicated that we are allowed to act as a fabric
7208 * manager, so place the full management partition key in the second
7209 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7210 * that we should already have the limited management partition key in
7211 * array element 1, and also that the port is not yet up when
7212 * add_full_mgmt_pkey() is invoked.
7214 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7216 struct hfi1_devdata *dd = ppd->dd;
7218 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7219 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7220 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7221 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7222 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7223 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7224 hfi1_event_pkey_change(ppd->dd, ppd->port);
7227 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7229 if (ppd->pkeys[2] != 0) {
7231 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7232 hfi1_event_pkey_change(ppd->dd, ppd->port);
7237 * Convert the given link width to the OPA link width bitmask.
7239 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7244 * Simulator and quick linkup do not set the width.
7245 * Just set it to 4x without complaint.
7247 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7248 return OPA_LINK_WIDTH_4X;
7249 return 0; /* no lanes up */
7250 case 1: return OPA_LINK_WIDTH_1X;
7251 case 2: return OPA_LINK_WIDTH_2X;
7252 case 3: return OPA_LINK_WIDTH_3X;
7254 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7257 case 4: return OPA_LINK_WIDTH_4X;
7262 * Do a population count on the bottom nibble.
7264 static const u8 bit_counts[16] = {
7265 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7268 static inline u8 nibble_to_count(u8 nibble)
7270 return bit_counts[nibble & 0xf];
7274 * Read the active lane information from the 8051 registers and return
7277 * Active lane information is found in these 8051 registers:
7281 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7287 u8 tx_polarity_inversion;
7288 u8 rx_polarity_inversion;
7291 /* read the active lanes */
7292 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7293 &rx_polarity_inversion, &max_rate);
7294 read_local_lni(dd, &enable_lane_rx);
7296 /* convert to counts */
7297 tx = nibble_to_count(enable_lane_tx);
7298 rx = nibble_to_count(enable_lane_rx);
7301 * Set link_speed_active here, overriding what was set in
7302 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7303 * set the max_rate field in handle_verify_cap until v0.19.
7305 if ((dd->icode == ICODE_RTL_SILICON) &&
7306 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7307 /* max_rate: 0 = 12.5G, 1 = 25G */
7310 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7314 "%s: unexpected max rate %d, using 25Gb\n",
7315 __func__, (int)max_rate);
7318 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7324 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7325 enable_lane_tx, tx, enable_lane_rx, rx);
7326 *tx_width = link_width_to_bits(dd, tx);
7327 *rx_width = link_width_to_bits(dd, rx);
7331 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7332 * Valid after the end of VerifyCap and during LinkUp. Does not change
7333 * after link up. I.e. look elsewhere for downgrade information.
7336 * + bits [7:4] contain the number of active transmitters
7337 * + bits [3:0] contain the number of active receivers
7338 * These are numbers 1 through 4 and can be different values if the
7339 * link is asymmetric.
7341 * verify_cap_local_fm_link_width[0] retains its original value.
7343 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7347 u8 misc_bits, local_flags;
7348 u16 active_tx, active_rx;
7350 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7352 rx = (widths >> 8) & 0xf;
7354 *tx_width = link_width_to_bits(dd, tx);
7355 *rx_width = link_width_to_bits(dd, rx);
7357 /* print the active widths */
7358 get_link_widths(dd, &active_tx, &active_rx);
7362 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7363 * hardware information when the link first comes up.
7365 * The link width is not available until after VerifyCap.AllFramesReceived
7366 * (the trigger for handle_verify_cap), so this is outside that routine
7367 * and should be called when the 8051 signals linkup.
7369 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7371 u16 tx_width, rx_width;
7373 /* get end-of-LNI link widths */
7374 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7376 /* use tx_width as the link is supposed to be symmetric on link up */
7377 ppd->link_width_active = tx_width;
7378 /* link width downgrade active (LWD.A) starts out matching LW.A */
7379 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7380 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7381 /* per OPA spec, on link up LWD.E resets to LWD.S */
7382 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7383 /* cache the active egress rate (units {10^6 bits/sec]) */
7384 ppd->current_egress_rate = active_egress_rate(ppd);
7388 * Handle a verify capabilities interrupt from the 8051.
7390 * This is a work-queue function outside of the interrupt.
7392 void handle_verify_cap(struct work_struct *work)
7394 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7396 struct hfi1_devdata *dd = ppd->dd;
7398 u8 power_management;
7408 u16 active_tx, active_rx;
7409 u8 partner_supported_crc;
7413 set_link_state(ppd, HLS_VERIFY_CAP);
7415 lcb_shutdown(dd, 0);
7416 adjust_lcb_for_fpga_serdes(dd);
7418 read_vc_remote_phy(dd, &power_management, &continuous);
7419 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7420 &partner_supported_crc);
7421 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7422 read_remote_device_id(dd, &device_id, &device_rev);
7424 * And the 'MgmtAllowed' information, which is exchanged during
7425 * LNI, is also be available at this point.
7427 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7428 /* print the active widths */
7429 get_link_widths(dd, &active_tx, &active_rx);
7431 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7432 (int)power_management, (int)continuous);
7434 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7435 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7436 (int)partner_supported_crc);
7437 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7438 (u32)remote_tx_rate, (u32)link_widths);
7439 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7440 (u32)device_id, (u32)device_rev);
7442 * The peer vAU value just read is the peer receiver value. HFI does
7443 * not support a transmit vAU of 0 (AU == 8). We advertised that
7444 * with Z=1 in the fabric capabilities sent to the peer. The peer
7445 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7446 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7447 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7448 * subject to the Z value exception.
7452 set_up_vau(dd, vau);
7455 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7456 * credits value and wait for link-up interrupt ot set it.
7459 dd->vl15buf_cached = vl15buf;
7461 /* set up the LCB CRC mode */
7462 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7464 /* order is important: use the lowest bit in common */
7465 if (crc_mask & CAP_CRC_14B)
7466 crc_val = LCB_CRC_14B;
7467 else if (crc_mask & CAP_CRC_48B)
7468 crc_val = LCB_CRC_48B;
7469 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7470 crc_val = LCB_CRC_12B_16B_PER_LANE;
7472 crc_val = LCB_CRC_16B;
7474 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7475 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7476 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7478 /* set (14b only) or clear sideband credit */
7479 reg = read_csr(dd, SEND_CM_CTRL);
7480 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7481 write_csr(dd, SEND_CM_CTRL,
7482 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7484 write_csr(dd, SEND_CM_CTRL,
7485 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7488 ppd->link_speed_active = 0; /* invalid value */
7489 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7490 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7491 switch (remote_tx_rate) {
7493 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7496 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7500 /* actual rate is highest bit of the ANDed rates */
7501 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7504 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7506 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7508 if (ppd->link_speed_active == 0) {
7509 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7510 __func__, (int)remote_tx_rate);
7511 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7515 * Cache the values of the supported, enabled, and active
7516 * LTP CRC modes to return in 'portinfo' queries. But the bit
7517 * flags that are returned in the portinfo query differ from
7518 * what's in the link_crc_mask, crc_sizes, and crc_val
7519 * variables. Convert these here.
7521 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7522 /* supported crc modes */
7523 ppd->port_ltp_crc_mode |=
7524 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7525 /* enabled crc modes */
7526 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7527 /* active crc mode */
7529 /* set up the remote credit return table */
7530 assign_remote_cm_au_table(dd, vcu);
7533 * The LCB is reset on entry to handle_verify_cap(), so this must
7534 * be applied on every link up.
7536 * Adjust LCB error kill enable to kill the link if
7537 * these RBUF errors are seen:
7538 * REPLAY_BUF_MBE_SMASK
7539 * FLIT_INPUT_BUF_MBE_SMASK
7541 if (is_ax(dd)) { /* fixed in B0 */
7542 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7543 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7544 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7545 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7548 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7549 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7551 /* give 8051 access to the LCB CSRs */
7552 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7553 set_8051_lcb_access(dd);
7555 if (ppd->mgmt_allowed)
7556 add_full_mgmt_pkey(ppd);
7558 /* tell the 8051 to go to LinkUp */
7559 set_link_state(ppd, HLS_GOING_UP);
7563 * Apply the link width downgrade enabled policy against the current active
7566 * Called when the enabled policy changes or the active link widths change.
7568 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7575 /* use the hls lock to avoid a race with actual link up */
7578 mutex_lock(&ppd->hls_lock);
7579 /* only apply if the link is up */
7580 if (ppd->host_link_state & HLS_DOWN) {
7581 /* still going up..wait and retry */
7582 if (ppd->host_link_state & HLS_GOING_UP) {
7583 if (++tries < 1000) {
7584 mutex_unlock(&ppd->hls_lock);
7585 usleep_range(100, 120); /* arbitrary */
7589 "%s: giving up waiting for link state change\n",
7595 lwde = ppd->link_width_downgrade_enabled;
7597 if (refresh_widths) {
7598 get_link_widths(ppd->dd, &tx, &rx);
7599 ppd->link_width_downgrade_tx_active = tx;
7600 ppd->link_width_downgrade_rx_active = rx;
7603 if (ppd->link_width_downgrade_tx_active == 0 ||
7604 ppd->link_width_downgrade_rx_active == 0) {
7605 /* the 8051 reported a dead link as a downgrade */
7606 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7607 } else if (lwde == 0) {
7608 /* downgrade is disabled */
7610 /* bounce if not at starting active width */
7611 if ((ppd->link_width_active !=
7612 ppd->link_width_downgrade_tx_active) ||
7613 (ppd->link_width_active !=
7614 ppd->link_width_downgrade_rx_active)) {
7616 "Link downgrade is disabled and link has downgraded, downing link\n");
7618 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7619 ppd->link_width_active,
7620 ppd->link_width_downgrade_tx_active,
7621 ppd->link_width_downgrade_rx_active);
7624 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7625 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7626 /* Tx or Rx is outside the enabled policy */
7628 "Link is outside of downgrade allowed, downing link\n");
7630 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7631 lwde, ppd->link_width_downgrade_tx_active,
7632 ppd->link_width_downgrade_rx_active);
7637 mutex_unlock(&ppd->hls_lock);
7640 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7641 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7642 set_link_state(ppd, HLS_DN_OFFLINE);
7648 * Handle a link downgrade interrupt from the 8051.
7650 * This is a work-queue function outside of the interrupt.
7652 void handle_link_downgrade(struct work_struct *work)
7654 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7655 link_downgrade_work);
7657 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7658 apply_link_downgrade_policy(ppd, 1);
7661 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7663 return flag_string(buf, buf_len, flags, dcc_err_flags,
7664 ARRAY_SIZE(dcc_err_flags));
7667 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7669 return flag_string(buf, buf_len, flags, lcb_err_flags,
7670 ARRAY_SIZE(lcb_err_flags));
7673 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7675 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7676 ARRAY_SIZE(dc8051_err_flags));
7679 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7681 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7682 ARRAY_SIZE(dc8051_info_err_flags));
7685 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7687 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7688 ARRAY_SIZE(dc8051_info_host_msg_flags));
7691 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7693 struct hfi1_pportdata *ppd = dd->pport;
7694 u64 info, err, host_msg;
7695 int queue_link_down = 0;
7698 /* look at the flags */
7699 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7700 /* 8051 information set by firmware */
7701 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7702 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7703 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7704 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7706 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7707 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7710 * Handle error flags.
7712 if (err & FAILED_LNI) {
7714 * LNI error indications are cleared by the 8051
7715 * only when starting polling. Only pay attention
7716 * to them when in the states that occur during
7719 if (ppd->host_link_state
7720 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7721 queue_link_down = 1;
7722 dd_dev_info(dd, "Link error: %s\n",
7723 dc8051_info_err_string(buf,
7728 err &= ~(u64)FAILED_LNI;
7730 /* unknown frames can happen durning LNI, just count */
7731 if (err & UNKNOWN_FRAME) {
7732 ppd->unknown_frame_count++;
7733 err &= ~(u64)UNKNOWN_FRAME;
7736 /* report remaining errors, but do not do anything */
7737 dd_dev_err(dd, "8051 info error: %s\n",
7738 dc8051_info_err_string(buf, sizeof(buf),
7743 * Handle host message flags.
7745 if (host_msg & HOST_REQ_DONE) {
7747 * Presently, the driver does a busy wait for
7748 * host requests to complete. This is only an
7749 * informational message.
7750 * NOTE: The 8051 clears the host message
7751 * information *on the next 8051 command*.
7752 * Therefore, when linkup is achieved,
7753 * this flag will still be set.
7755 host_msg &= ~(u64)HOST_REQ_DONE;
7757 if (host_msg & BC_SMA_MSG) {
7758 queue_work(ppd->link_wq, &ppd->sma_message_work);
7759 host_msg &= ~(u64)BC_SMA_MSG;
7761 if (host_msg & LINKUP_ACHIEVED) {
7762 dd_dev_info(dd, "8051: Link up\n");
7763 queue_work(ppd->link_wq, &ppd->link_up_work);
7764 host_msg &= ~(u64)LINKUP_ACHIEVED;
7766 if (host_msg & EXT_DEVICE_CFG_REQ) {
7767 handle_8051_request(ppd);
7768 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7770 if (host_msg & VERIFY_CAP_FRAME) {
7771 queue_work(ppd->link_wq, &ppd->link_vc_work);
7772 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7774 if (host_msg & LINK_GOING_DOWN) {
7775 const char *extra = "";
7776 /* no downgrade action needed if going down */
7777 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7778 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7779 extra = " (ignoring downgrade)";
7781 dd_dev_info(dd, "8051: Link down%s\n", extra);
7782 queue_link_down = 1;
7783 host_msg &= ~(u64)LINK_GOING_DOWN;
7785 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7786 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7787 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7790 /* report remaining messages, but do not do anything */
7791 dd_dev_info(dd, "8051 info host message: %s\n",
7792 dc8051_info_host_msg_string(buf,
7797 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7799 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7801 * Lost the 8051 heartbeat. If this happens, we
7802 * receive constant interrupts about it. Disable
7803 * the interrupt after the first.
7805 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7806 write_csr(dd, DC_DC8051_ERR_EN,
7807 read_csr(dd, DC_DC8051_ERR_EN) &
7808 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7810 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7813 /* report the error, but do not do anything */
7814 dd_dev_err(dd, "8051 error: %s\n",
7815 dc8051_err_string(buf, sizeof(buf), reg));
7818 if (queue_link_down) {
7820 * if the link is already going down or disabled, do not
7821 * queue another. If there's a link down entry already
7822 * queued, don't queue another one.
7824 if ((ppd->host_link_state &
7825 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7826 ppd->link_enabled == 0) {
7827 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7828 __func__, ppd->host_link_state,
7831 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7833 "%s: link down request already queued\n",
7836 queue_work(ppd->link_wq, &ppd->link_down_work);
7841 static const char * const fm_config_txt[] = {
7843 "BadHeadDist: Distance violation between two head flits",
7845 "BadTailDist: Distance violation between two tail flits",
7847 "BadCtrlDist: Distance violation between two credit control flits",
7849 "BadCrdAck: Credits return for unsupported VL",
7851 "UnsupportedVLMarker: Received VL Marker",
7853 "BadPreempt: Exceeded the preemption nesting level",
7855 "BadControlFlit: Received unsupported control flit",
7858 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7861 static const char * const port_rcv_txt[] = {
7863 "BadPktLen: Illegal PktLen",
7865 "PktLenTooLong: Packet longer than PktLen",
7867 "PktLenTooShort: Packet shorter than PktLen",
7869 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7871 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7873 "BadL2: Illegal L2 opcode",
7875 "BadSC: Unsupported SC",
7877 "BadRC: Illegal RC",
7879 "PreemptError: Preempting with same VL",
7881 "PreemptVL15: Preempting a VL15 packet",
7884 #define OPA_LDR_FMCONFIG_OFFSET 16
7885 #define OPA_LDR_PORTRCV_OFFSET 0
7886 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7888 u64 info, hdr0, hdr1;
7891 struct hfi1_pportdata *ppd = dd->pport;
7895 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7896 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7897 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7898 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7899 /* set status bit */
7900 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7902 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7905 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7906 struct hfi1_pportdata *ppd = dd->pport;
7907 /* this counter saturates at (2^32) - 1 */
7908 if (ppd->link_downed < (u32)UINT_MAX)
7910 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7913 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7914 u8 reason_valid = 1;
7916 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7917 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7918 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7919 /* set status bit */
7920 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7930 extra = fm_config_txt[info];
7933 extra = fm_config_txt[info];
7934 if (ppd->port_error_action &
7935 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7938 * lcl_reason cannot be derived from info
7942 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7947 snprintf(buf, sizeof(buf), "reserved%lld", info);
7952 if (reason_valid && !do_bounce) {
7953 do_bounce = ppd->port_error_action &
7954 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7955 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7958 /* just report this */
7959 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7961 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7964 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7965 u8 reason_valid = 1;
7967 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7968 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7969 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7970 if (!(dd->err_info_rcvport.status_and_code &
7971 OPA_EI_STATUS_SMASK)) {
7972 dd->err_info_rcvport.status_and_code =
7973 info & OPA_EI_CODE_SMASK;
7974 /* set status bit */
7975 dd->err_info_rcvport.status_and_code |=
7976 OPA_EI_STATUS_SMASK;
7978 * save first 2 flits in the packet that caused
7981 dd->err_info_rcvport.packet_flit1 = hdr0;
7982 dd->err_info_rcvport.packet_flit2 = hdr1;
7995 extra = port_rcv_txt[info];
7999 snprintf(buf, sizeof(buf), "reserved%lld", info);
8004 if (reason_valid && !do_bounce) {
8005 do_bounce = ppd->port_error_action &
8006 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8007 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8010 /* just report this */
8011 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8012 " hdr0 0x%llx, hdr1 0x%llx\n",
8015 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8018 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8019 /* informative only */
8020 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8021 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8023 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8024 /* informative only */
8025 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8026 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8029 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8030 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8032 /* report any remaining errors */
8034 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8035 dcc_err_string(buf, sizeof(buf), reg));
8037 if (lcl_reason == 0)
8038 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8041 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8043 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8044 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8048 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8052 dd_dev_info(dd, "LCB Error: %s\n",
8053 lcb_err_string(buf, sizeof(buf), reg));
8057 * CCE block DC interrupt. Source is < 8.
8059 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8061 const struct err_reg_info *eri = &dc_errs[source];
8064 interrupt_clear_down(dd, 0, eri);
8065 } else if (source == 3 /* dc_lbm_int */) {
8067 * This indicates that a parity error has occurred on the
8068 * address/control lines presented to the LBM. The error
8069 * is a single pulse, there is no associated error flag,
8070 * and it is non-maskable. This is because if a parity
8071 * error occurs on the request the request is dropped.
8072 * This should never occur, but it is nice to know if it
8075 dd_dev_err(dd, "Parity error in DC LBM block\n");
8077 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8082 * TX block send credit interrupt. Source is < 160.
8084 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8086 sc_group_release_update(dd, source);
8090 * TX block SDMA interrupt. Source is < 48.
8092 * SDMA interrupts are grouped by type:
8095 * N - 2N-1 = SDmaProgress
8096 * 2N - 3N-1 = SDmaIdle
8098 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8100 /* what interrupt */
8101 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8103 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8105 #ifdef CONFIG_SDMA_VERBOSITY
8106 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8107 slashstrip(__FILE__), __LINE__, __func__);
8108 sdma_dumpstate(&dd->per_sdma[which]);
8111 if (likely(what < 3 && which < dd->num_sdma)) {
8112 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8114 /* should not happen */
8115 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8120 * RX block receive available interrupt. Source is < 160.
8122 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8124 struct hfi1_ctxtdata *rcd;
8127 if (likely(source < dd->num_rcv_contexts)) {
8128 rcd = hfi1_rcd_get_by_index(dd, source);
8130 /* Check for non-user contexts, including vnic */
8131 if ((source < dd->first_dyn_alloc_ctxt) ||
8132 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
8133 rcd->do_interrupt(rcd, 0);
8135 handle_user_interrupt(rcd);
8140 /* received an interrupt, but no rcd */
8141 err_detail = "dataless";
8143 /* received an interrupt, but are not using that context */
8144 err_detail = "out of range";
8146 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8147 err_detail, source);
8151 * RX block receive urgent interrupt. Source is < 160.
8153 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8155 struct hfi1_ctxtdata *rcd;
8158 if (likely(source < dd->num_rcv_contexts)) {
8159 rcd = hfi1_rcd_get_by_index(dd, source);
8161 /* only pay attention to user urgent interrupts */
8162 if ((source >= dd->first_dyn_alloc_ctxt) &&
8163 (!rcd->sc || (rcd->sc->type == SC_USER)))
8164 handle_user_interrupt(rcd);
8169 /* received an interrupt, but no rcd */
8170 err_detail = "dataless";
8172 /* received an interrupt, but are not using that context */
8173 err_detail = "out of range";
8175 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8176 err_detail, source);
8180 * Reserved range interrupt. Should not be called in normal operation.
8182 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8186 dd_dev_err(dd, "unexpected %s interrupt\n",
8187 is_reserved_name(name, sizeof(name), source));
8190 static const struct is_table is_table[] = {
8193 * name func interrupt func
8195 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8196 is_misc_err_name, is_misc_err_int },
8197 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8198 is_sdma_eng_err_name, is_sdma_eng_err_int },
8199 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8200 is_sendctxt_err_name, is_sendctxt_err_int },
8201 { IS_SDMA_START, IS_SDMA_END,
8202 is_sdma_eng_name, is_sdma_eng_int },
8203 { IS_VARIOUS_START, IS_VARIOUS_END,
8204 is_various_name, is_various_int },
8205 { IS_DC_START, IS_DC_END,
8206 is_dc_name, is_dc_int },
8207 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8208 is_rcv_avail_name, is_rcv_avail_int },
8209 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8210 is_rcv_urgent_name, is_rcv_urgent_int },
8211 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8212 is_send_credit_name, is_send_credit_int},
8213 { IS_RESERVED_START, IS_RESERVED_END,
8214 is_reserved_name, is_reserved_int},
8218 * Interrupt source interrupt - called when the given source has an interrupt.
8219 * Source is a bit index into an array of 64-bit integers.
8221 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8223 const struct is_table *entry;
8225 /* avoids a double compare by walking the table in-order */
8226 for (entry = &is_table[0]; entry->is_name; entry++) {
8227 if (source < entry->end) {
8228 trace_hfi1_interrupt(dd, entry, source);
8229 entry->is_int(dd, source - entry->start);
8233 /* fell off the end */
8234 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8238 * General interrupt handler. This is able to correctly handle
8239 * all interrupts in case INTx is used.
8241 static irqreturn_t general_interrupt(int irq, void *data)
8243 struct hfi1_devdata *dd = data;
8244 u64 regs[CCE_NUM_INT_CSRS];
8247 irqreturn_t handled = IRQ_NONE;
8249 this_cpu_inc(*dd->int_counter);
8251 /* phase 1: scan and clear all handled interrupts */
8252 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8253 if (dd->gi_mask[i] == 0) {
8254 regs[i] = 0; /* used later */
8257 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8259 /* only clear if anything is set */
8261 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8264 /* phase 2: call the appropriate handler */
8265 for_each_set_bit(bit, (unsigned long *)®s[0],
8266 CCE_NUM_INT_CSRS * 64) {
8267 is_interrupt(dd, bit);
8268 handled = IRQ_HANDLED;
8274 static irqreturn_t sdma_interrupt(int irq, void *data)
8276 struct sdma_engine *sde = data;
8277 struct hfi1_devdata *dd = sde->dd;
8280 #ifdef CONFIG_SDMA_VERBOSITY
8281 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8282 slashstrip(__FILE__), __LINE__, __func__);
8283 sdma_dumpstate(sde);
8286 this_cpu_inc(*dd->int_counter);
8288 /* This read_csr is really bad in the hot path */
8289 status = read_csr(dd,
8290 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8292 if (likely(status)) {
8293 /* clear the interrupt(s) */
8295 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8298 /* handle the interrupt(s) */
8299 sdma_engine_interrupt(sde, status);
8301 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8308 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8309 * to insure that the write completed. This does NOT guarantee that
8310 * queued DMA writes to memory from the chip are pushed.
8312 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8314 struct hfi1_devdata *dd = rcd->dd;
8315 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8317 mmiowb(); /* make sure everything before is written */
8318 write_csr(dd, addr, rcd->imask);
8319 /* force the above write on the chip and get a value back */
8320 (void)read_csr(dd, addr);
8323 /* force the receive interrupt */
8324 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8326 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8330 * Return non-zero if a packet is present.
8332 * This routine is called when rechecking for packets after the RcvAvail
8333 * interrupt has been cleared down. First, do a quick check of memory for
8334 * a packet present. If not found, use an expensive CSR read of the context
8335 * tail to determine the actual tail. The CSR read is necessary because there
8336 * is no method to push pending DMAs to memory other than an interrupt and we
8337 * are trying to determine if we need to force an interrupt.
8339 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8344 if (!rcd->rcvhdrtail_kvaddr)
8345 present = (rcd->seq_cnt ==
8346 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8347 else /* is RDMA rtail */
8348 present = (rcd->head != get_rcvhdrtail(rcd));
8353 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8354 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8355 return rcd->head != tail;
8359 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8360 * This routine will try to handle packets immediately (latency), but if
8361 * it finds too many, it will invoke the thread handler (bandwitdh). The
8362 * chip receive interrupt is *not* cleared down until this or the thread (if
8363 * invoked) is finished. The intent is to avoid extra interrupts while we
8364 * are processing packets anyway.
8366 static irqreturn_t receive_context_interrupt(int irq, void *data)
8368 struct hfi1_ctxtdata *rcd = data;
8369 struct hfi1_devdata *dd = rcd->dd;
8373 trace_hfi1_receive_interrupt(dd, rcd);
8374 this_cpu_inc(*dd->int_counter);
8375 aspm_ctx_disable(rcd);
8377 /* receive interrupt remains blocked while processing packets */
8378 disposition = rcd->do_interrupt(rcd, 0);
8381 * Too many packets were seen while processing packets in this
8382 * IRQ handler. Invoke the handler thread. The receive interrupt
8385 if (disposition == RCV_PKT_LIMIT)
8386 return IRQ_WAKE_THREAD;
8389 * The packet processor detected no more packets. Clear the receive
8390 * interrupt and recheck for a packet packet that may have arrived
8391 * after the previous check and interrupt clear. If a packet arrived,
8392 * force another interrupt.
8394 clear_recv_intr(rcd);
8395 present = check_packet_present(rcd);
8397 force_recv_intr(rcd);
8403 * Receive packet thread handler. This expects to be invoked with the
8404 * receive interrupt still blocked.
8406 static irqreturn_t receive_context_thread(int irq, void *data)
8408 struct hfi1_ctxtdata *rcd = data;
8411 /* receive interrupt is still blocked from the IRQ handler */
8412 (void)rcd->do_interrupt(rcd, 1);
8415 * The packet processor will only return if it detected no more
8416 * packets. Hold IRQs here so we can safely clear the interrupt and
8417 * recheck for a packet that may have arrived after the previous
8418 * check and the interrupt clear. If a packet arrived, force another
8421 local_irq_disable();
8422 clear_recv_intr(rcd);
8423 present = check_packet_present(rcd);
8425 force_recv_intr(rcd);
8431 /* ========================================================================= */
8433 u32 read_physical_state(struct hfi1_devdata *dd)
8437 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8438 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8439 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8442 u32 read_logical_state(struct hfi1_devdata *dd)
8446 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8447 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8448 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8451 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8455 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8456 /* clear current state, set new state */
8457 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8458 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8459 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8463 * Use the 8051 to read a LCB CSR.
8465 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8470 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8471 if (acquire_lcb_access(dd, 0) == 0) {
8472 *data = read_csr(dd, addr);
8473 release_lcb_access(dd, 0);
8479 /* register is an index of LCB registers: (offset - base) / 8 */
8480 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8481 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8482 if (ret != HCMD_SUCCESS)
8488 * Provide a cache for some of the LCB registers in case the LCB is
8490 * (The LCB is unavailable in certain link states, for example.)
8497 static struct lcb_datum lcb_cache[] = {
8498 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8499 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8500 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8503 static void update_lcb_cache(struct hfi1_devdata *dd)
8509 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8510 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8512 /* Update if we get good data */
8513 if (likely(ret != -EBUSY))
8514 lcb_cache[i].val = val;
8518 static int read_lcb_cache(u32 off, u64 *val)
8522 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8523 if (lcb_cache[i].off == off) {
8524 *val = lcb_cache[i].val;
8529 pr_warn("%s bad offset 0x%x\n", __func__, off);
8534 * Read an LCB CSR. Access may not be in host control, so check.
8535 * Return 0 on success, -EBUSY on failure.
8537 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8539 struct hfi1_pportdata *ppd = dd->pport;
8541 /* if up, go through the 8051 for the value */
8542 if (ppd->host_link_state & HLS_UP)
8543 return read_lcb_via_8051(dd, addr, data);
8544 /* if going up or down, check the cache, otherwise, no access */
8545 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8546 if (read_lcb_cache(addr, data))
8551 /* otherwise, host has access */
8552 *data = read_csr(dd, addr);
8557 * Use the 8051 to write a LCB CSR.
8559 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8564 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8565 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8566 if (acquire_lcb_access(dd, 0) == 0) {
8567 write_csr(dd, addr, data);
8568 release_lcb_access(dd, 0);
8574 /* register is an index of LCB registers: (offset - base) / 8 */
8575 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8576 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8577 if (ret != HCMD_SUCCESS)
8583 * Write an LCB CSR. Access may not be in host control, so check.
8584 * Return 0 on success, -EBUSY on failure.
8586 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8588 struct hfi1_pportdata *ppd = dd->pport;
8590 /* if up, go through the 8051 for the value */
8591 if (ppd->host_link_state & HLS_UP)
8592 return write_lcb_via_8051(dd, addr, data);
8593 /* if going up or down, no access */
8594 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8596 /* otherwise, host has access */
8597 write_csr(dd, addr, data);
8603 * < 0 = Linux error, not able to get access
8604 * > 0 = 8051 command RETURN_CODE
8606 static int do_8051_command(
8607 struct hfi1_devdata *dd,
8614 unsigned long timeout;
8616 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8618 mutex_lock(&dd->dc8051_lock);
8620 /* We can't send any commands to the 8051 if it's in reset */
8621 if (dd->dc_shutdown) {
8622 return_code = -ENODEV;
8627 * If an 8051 host command timed out previously, then the 8051 is
8630 * On first timeout, attempt to reset and restart the entire DC
8631 * block (including 8051). (Is this too big of a hammer?)
8633 * If the 8051 times out a second time, the reset did not bring it
8634 * back to healthy life. In that case, fail any subsequent commands.
8636 if (dd->dc8051_timed_out) {
8637 if (dd->dc8051_timed_out > 1) {
8639 "Previous 8051 host command timed out, skipping command %u\n",
8641 return_code = -ENXIO;
8649 * If there is no timeout, then the 8051 command interface is
8650 * waiting for a command.
8654 * When writing a LCB CSR, out_data contains the full value to
8655 * to be written, while in_data contains the relative LCB
8656 * address in 7:0. Do the work here, rather than the caller,
8657 * of distrubting the write data to where it needs to go:
8660 * 39:00 -> in_data[47:8]
8661 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8662 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8664 if (type == HCMD_WRITE_LCB_CSR) {
8665 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8666 /* must preserve COMPLETED - it is tied to hardware */
8667 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8668 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8669 reg |= ((((*out_data) >> 40) & 0xff) <<
8670 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8671 | ((((*out_data) >> 48) & 0xffff) <<
8672 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8673 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8677 * Do two writes: the first to stabilize the type and req_data, the
8678 * second to activate.
8680 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8681 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8682 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8683 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8684 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8685 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8686 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8688 /* wait for completion, alternate: interrupt */
8689 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8691 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8692 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8695 if (time_after(jiffies, timeout)) {
8696 dd->dc8051_timed_out++;
8697 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8700 return_code = -ETIMEDOUT;
8707 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8708 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8709 if (type == HCMD_READ_LCB_CSR) {
8710 /* top 16 bits are in a different register */
8711 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8712 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8714 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8717 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8718 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8719 dd->dc8051_timed_out = 0;
8721 * Clear command for next user.
8723 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8726 mutex_unlock(&dd->dc8051_lock);
8730 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8732 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8735 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8736 u8 lane_id, u32 config_data)
8741 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8742 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8743 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8744 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8745 if (ret != HCMD_SUCCESS) {
8747 "load 8051 config: field id %d, lane %d, err %d\n",
8748 (int)field_id, (int)lane_id, ret);
8754 * Read the 8051 firmware "registers". Use the RAM directly. Always
8755 * set the result, even on error.
8756 * Return 0 on success, -errno on failure
8758 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8765 /* address start depends on the lane_id */
8767 addr = (4 * NUM_GENERAL_FIELDS)
8768 + (lane_id * 4 * NUM_LANE_FIELDS);
8771 addr += field_id * 4;
8773 /* read is in 8-byte chunks, hardware will truncate the address down */
8774 ret = read_8051_data(dd, addr, 8, &big_data);
8777 /* extract the 4 bytes we want */
8779 *result = (u32)(big_data >> 32);
8781 *result = (u32)big_data;
8784 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8785 __func__, lane_id, field_id);
8791 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8796 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8797 | power_management << POWER_MANAGEMENT_SHIFT;
8798 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8799 GENERAL_CONFIG, frame);
8802 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8803 u16 vl15buf, u8 crc_sizes)
8807 frame = (u32)vau << VAU_SHIFT
8809 | (u32)vcu << VCU_SHIFT
8810 | (u32)vl15buf << VL15BUF_SHIFT
8811 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8812 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8813 GENERAL_CONFIG, frame);
8816 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8817 u8 *flag_bits, u16 *link_widths)
8821 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8823 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8824 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8825 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8828 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8835 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8836 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8837 | (u32)link_widths << LINK_WIDTH_SHIFT;
8838 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8842 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8847 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8848 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8849 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8852 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8857 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8858 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8859 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8860 & REMOTE_DEVICE_REV_MASK;
8863 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8868 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8869 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8870 /* Clear, then set field */
8872 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8873 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8877 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8882 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8883 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8884 STS_FM_VERSION_MAJOR_MASK;
8885 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8886 STS_FM_VERSION_MINOR_MASK;
8888 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8889 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8890 STS_FM_VERSION_PATCH_MASK;
8893 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8898 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8899 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8900 & POWER_MANAGEMENT_MASK;
8901 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8902 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8905 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8906 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8910 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8911 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8912 *z = (frame >> Z_SHIFT) & Z_MASK;
8913 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8914 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8915 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8918 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8924 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8926 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8927 & REMOTE_TX_RATE_MASK;
8928 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8931 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8935 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8936 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8939 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8943 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8944 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8947 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8949 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8952 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8954 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8957 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8963 if (dd->pport->host_link_state & HLS_UP) {
8964 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8967 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8968 & LINK_QUALITY_MASK;
8972 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8976 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8977 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8980 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8984 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8985 *ldr = (frame & 0xff);
8988 static int read_tx_settings(struct hfi1_devdata *dd,
8990 u8 *tx_polarity_inversion,
8991 u8 *rx_polarity_inversion,
8997 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8998 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8999 & ENABLE_LANE_TX_MASK;
9000 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9001 & TX_POLARITY_INVERSION_MASK;
9002 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9003 & RX_POLARITY_INVERSION_MASK;
9004 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9008 static int write_tx_settings(struct hfi1_devdata *dd,
9010 u8 tx_polarity_inversion,
9011 u8 rx_polarity_inversion,
9016 /* no need to mask, all variable sizes match field widths */
9017 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9018 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9019 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9020 | max_rate << MAX_RATE_SHIFT;
9021 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9025 * Read an idle LCB message.
9027 * Returns 0 on success, -EINVAL on error
9029 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9033 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9034 if (ret != HCMD_SUCCESS) {
9035 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9039 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9040 /* return only the payload as we already know the type */
9041 *data_out >>= IDLE_PAYLOAD_SHIFT;
9046 * Read an idle SMA message. To be done in response to a notification from
9049 * Returns 0 on success, -EINVAL on error
9051 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9053 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9058 * Send an idle LCB message.
9060 * Returns 0 on success, -EINVAL on error
9062 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9066 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9067 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9068 if (ret != HCMD_SUCCESS) {
9069 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9077 * Send an idle SMA message.
9079 * Returns 0 on success, -EINVAL on error
9081 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9085 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9086 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9087 return send_idle_message(dd, data);
9091 * Initialize the LCB then do a quick link up. This may or may not be
9094 * return 0 on success, -errno on error
9096 static int do_quick_linkup(struct hfi1_devdata *dd)
9100 lcb_shutdown(dd, 0);
9103 /* LCB_CFG_LOOPBACK.VAL = 2 */
9104 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9105 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9106 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9107 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9110 /* start the LCBs */
9111 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9112 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9114 /* simulator only loopback steps */
9115 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9116 /* LCB_CFG_RUN.EN = 1 */
9117 write_csr(dd, DC_LCB_CFG_RUN,
9118 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9120 ret = wait_link_transfer_active(dd, 10);
9124 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9125 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9130 * When doing quick linkup and not in loopback, both
9131 * sides must be done with LCB set-up before either
9132 * starts the quick linkup. Put a delay here so that
9133 * both sides can be started and have a chance to be
9134 * done with LCB set up before resuming.
9137 "Pausing for peer to be finished with LCB set up\n");
9139 dd_dev_err(dd, "Continuing with quick linkup\n");
9142 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9143 set_8051_lcb_access(dd);
9146 * State "quick" LinkUp request sets the physical link state to
9147 * LinkUp without a verify capability sequence.
9148 * This state is in simulator v37 and later.
9150 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9151 if (ret != HCMD_SUCCESS) {
9153 "%s: set physical link state to quick LinkUp failed with return %d\n",
9156 set_host_lcb_access(dd);
9157 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9164 return 0; /* success */
9168 * Set the SerDes to internal loopback mode.
9169 * Returns 0 on success, -errno on error.
9171 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9175 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9176 if (ret == HCMD_SUCCESS)
9179 "Set physical link state to SerDes Loopback failed with return %d\n",
9187 * Do all special steps to set up loopback.
9189 static int init_loopback(struct hfi1_devdata *dd)
9191 dd_dev_info(dd, "Entering loopback mode\n");
9193 /* all loopbacks should disable self GUID check */
9194 write_csr(dd, DC_DC8051_CFG_MODE,
9195 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9198 * The simulator has only one loopback option - LCB. Switch
9199 * to that option, which includes quick link up.
9201 * Accept all valid loopback values.
9203 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9204 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9205 loopback == LOOPBACK_CABLE)) {
9206 loopback = LOOPBACK_LCB;
9211 /* handle serdes loopback */
9212 if (loopback == LOOPBACK_SERDES) {
9213 /* internal serdes loopack needs quick linkup on RTL */
9214 if (dd->icode == ICODE_RTL_SILICON)
9216 return set_serdes_loopback_mode(dd);
9219 /* LCB loopback - handled at poll time */
9220 if (loopback == LOOPBACK_LCB) {
9221 quick_linkup = 1; /* LCB is always quick linkup */
9223 /* not supported in emulation due to emulation RTL changes */
9224 if (dd->icode == ICODE_FPGA_EMULATION) {
9226 "LCB loopback not supported in emulation\n");
9232 /* external cable loopback requires no extra steps */
9233 if (loopback == LOOPBACK_CABLE)
9236 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9241 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9242 * used in the Verify Capability link width attribute.
9244 static u16 opa_to_vc_link_widths(u16 opa_widths)
9249 static const struct link_bits {
9252 } opa_link_xlate[] = {
9253 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9254 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9255 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9256 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9259 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9260 if (opa_widths & opa_link_xlate[i].from)
9261 result |= opa_link_xlate[i].to;
9267 * Set link attributes before moving to polling.
9269 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9271 struct hfi1_devdata *dd = ppd->dd;
9273 u8 tx_polarity_inversion;
9274 u8 rx_polarity_inversion;
9277 /* reset our fabric serdes to clear any lingering problems */
9278 fabric_serdes_reset(dd);
9280 /* set the local tx rate - need to read-modify-write */
9281 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9282 &rx_polarity_inversion, &ppd->local_tx_rate);
9284 goto set_local_link_attributes_fail;
9286 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9287 /* set the tx rate to the fastest enabled */
9288 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9289 ppd->local_tx_rate = 1;
9291 ppd->local_tx_rate = 0;
9293 /* set the tx rate to all enabled */
9294 ppd->local_tx_rate = 0;
9295 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9296 ppd->local_tx_rate |= 2;
9297 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9298 ppd->local_tx_rate |= 1;
9301 enable_lane_tx = 0xF; /* enable all four lanes */
9302 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9303 rx_polarity_inversion, ppd->local_tx_rate);
9304 if (ret != HCMD_SUCCESS)
9305 goto set_local_link_attributes_fail;
9308 * DC supports continuous updates.
9310 ret = write_vc_local_phy(dd,
9311 0 /* no power management */,
9312 1 /* continuous updates */);
9313 if (ret != HCMD_SUCCESS)
9314 goto set_local_link_attributes_fail;
9316 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9317 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9318 ppd->port_crc_mode_enabled);
9319 if (ret != HCMD_SUCCESS)
9320 goto set_local_link_attributes_fail;
9322 ret = write_vc_local_link_width(dd, 0, 0,
9323 opa_to_vc_link_widths(
9324 ppd->link_width_enabled));
9325 if (ret != HCMD_SUCCESS)
9326 goto set_local_link_attributes_fail;
9328 /* let peer know who we are */
9329 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9330 if (ret == HCMD_SUCCESS)
9333 set_local_link_attributes_fail:
9335 "Failed to set local link attributes, return 0x%x\n",
9341 * Call this to start the link.
9342 * Do not do anything if the link is disabled.
9343 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9345 int start_link(struct hfi1_pportdata *ppd)
9348 * Tune the SerDes to a ballpark setting for optimal signal and bit
9349 * error rate. Needs to be done before starting the link.
9353 if (!ppd->driver_link_ready) {
9354 dd_dev_info(ppd->dd,
9355 "%s: stopping link start because driver is not ready\n",
9361 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9362 * pkey table can be configured properly if the HFI unit is connected
9363 * to switch port with MgmtAllowed=NO
9365 clear_full_mgmt_pkey(ppd);
9367 return set_link_state(ppd, HLS_DN_POLL);
9370 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9372 struct hfi1_devdata *dd = ppd->dd;
9374 unsigned long timeout;
9377 * Some QSFP cables have a quirk that asserts the IntN line as a side
9378 * effect of power up on plug-in. We ignore this false positive
9379 * interrupt until the module has finished powering up by waiting for
9380 * a minimum timeout of the module inrush initialization time of
9381 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9382 * module have stabilized.
9387 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9389 timeout = jiffies + msecs_to_jiffies(2000);
9391 mask = read_csr(dd, dd->hfi1_id ?
9392 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9393 if (!(mask & QSFP_HFI0_INT_N))
9395 if (time_after(jiffies, timeout)) {
9396 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9404 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9406 struct hfi1_devdata *dd = ppd->dd;
9409 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9412 * Clear the status register to avoid an immediate interrupt
9413 * when we re-enable the IntN pin
9415 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9417 mask |= (u64)QSFP_HFI0_INT_N;
9419 mask &= ~(u64)QSFP_HFI0_INT_N;
9421 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9424 int reset_qsfp(struct hfi1_pportdata *ppd)
9426 struct hfi1_devdata *dd = ppd->dd;
9427 u64 mask, qsfp_mask;
9429 /* Disable INT_N from triggering QSFP interrupts */
9430 set_qsfp_int_n(ppd, 0);
9432 /* Reset the QSFP */
9433 mask = (u64)QSFP_HFI0_RESET_N;
9435 qsfp_mask = read_csr(dd,
9436 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9439 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9445 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9447 wait_for_qsfp_init(ppd);
9450 * Allow INT_N to trigger the QSFP interrupt to watch
9451 * for alarms and warnings
9453 set_qsfp_int_n(ppd, 1);
9456 * After the reset, AOC transmitters are enabled by default. They need
9457 * to be turned off to complete the QSFP setup before they can be
9460 return set_qsfp_tx(ppd, 0);
9463 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9464 u8 *qsfp_interrupt_status)
9466 struct hfi1_devdata *dd = ppd->dd;
9468 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9469 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9470 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9473 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9474 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9475 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9479 * The remaining alarms/warnings don't matter if the link is down.
9481 if (ppd->host_link_state & HLS_DOWN)
9484 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9485 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9486 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9489 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9490 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9491 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9494 /* Byte 2 is vendor specific */
9496 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9497 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9498 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9501 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9502 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9503 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9506 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9507 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9508 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9511 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9512 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9513 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9516 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9517 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9518 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9521 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9522 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9523 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9526 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9527 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9528 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9531 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9532 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9533 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9536 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9537 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9538 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9541 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9542 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9543 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9546 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9547 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9548 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9551 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9552 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9553 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9556 /* Bytes 9-10 and 11-12 are reserved */
9557 /* Bytes 13-15 are vendor specific */
9562 /* This routine will only be scheduled if the QSFP module present is asserted */
9563 void qsfp_event(struct work_struct *work)
9565 struct qsfp_data *qd;
9566 struct hfi1_pportdata *ppd;
9567 struct hfi1_devdata *dd;
9569 qd = container_of(work, struct qsfp_data, qsfp_work);
9574 if (!qsfp_mod_present(ppd))
9577 if (ppd->host_link_state == HLS_DN_DISABLE) {
9578 dd_dev_info(ppd->dd,
9579 "%s: stopping link start because link is disabled\n",
9585 * Turn DC back on after cable has been re-inserted. Up until
9586 * now, the DC has been in reset to save power.
9590 if (qd->cache_refresh_required) {
9591 set_qsfp_int_n(ppd, 0);
9593 wait_for_qsfp_init(ppd);
9596 * Allow INT_N to trigger the QSFP interrupt to watch
9597 * for alarms and warnings
9599 set_qsfp_int_n(ppd, 1);
9604 if (qd->check_interrupt_flags) {
9605 u8 qsfp_interrupt_status[16] = {0,};
9607 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9608 &qsfp_interrupt_status[0], 16) != 16) {
9610 "%s: Failed to read status of QSFP module\n",
9613 unsigned long flags;
9615 handle_qsfp_error_conditions(
9616 ppd, qsfp_interrupt_status);
9617 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9618 ppd->qsfp_info.check_interrupt_flags = 0;
9619 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9625 static void init_qsfp_int(struct hfi1_devdata *dd)
9627 struct hfi1_pportdata *ppd = dd->pport;
9628 u64 qsfp_mask, cce_int_mask;
9629 const int qsfp1_int_smask = QSFP1_INT % 64;
9630 const int qsfp2_int_smask = QSFP2_INT % 64;
9633 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9634 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9635 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9636 * the index of the appropriate CSR in the CCEIntMask CSR array
9638 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9639 (8 * (QSFP1_INT / 64)));
9641 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9642 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9645 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9646 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9650 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9651 /* Clear current status to avoid spurious interrupts */
9652 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9654 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9657 set_qsfp_int_n(ppd, 0);
9659 /* Handle active low nature of INT_N and MODPRST_N pins */
9660 if (qsfp_mod_present(ppd))
9661 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9663 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9668 * Do a one-time initialize of the LCB block.
9670 static void init_lcb(struct hfi1_devdata *dd)
9672 /* simulator does not correctly handle LCB cclk loopback, skip */
9673 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9676 /* the DC has been reset earlier in the driver load */
9678 /* set LCB for cclk loopback on the port */
9679 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9680 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9681 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9682 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9683 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9684 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9685 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9689 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9692 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9698 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9701 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9704 /* read byte 2, the status byte */
9705 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9711 return 0; /* success */
9715 * Values for QSFP retry.
9717 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9718 * arrived at from experience on a large cluster.
9720 #define MAX_QSFP_RETRIES 20
9721 #define QSFP_RETRY_WAIT 500 /* msec */
9724 * Try a QSFP read. If it fails, schedule a retry for later.
9725 * Called on first link activation after driver load.
9727 static void try_start_link(struct hfi1_pportdata *ppd)
9729 if (test_qsfp_read(ppd)) {
9731 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9732 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9735 dd_dev_info(ppd->dd,
9736 "QSFP not responding, waiting and retrying %d\n",
9737 (int)ppd->qsfp_retry_count);
9738 ppd->qsfp_retry_count++;
9739 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9740 msecs_to_jiffies(QSFP_RETRY_WAIT));
9743 ppd->qsfp_retry_count = 0;
9749 * Workqueue function to start the link after a delay.
9751 void handle_start_link(struct work_struct *work)
9753 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9754 start_link_work.work);
9755 try_start_link(ppd);
9758 int bringup_serdes(struct hfi1_pportdata *ppd)
9760 struct hfi1_devdata *dd = ppd->dd;
9764 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9765 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9767 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9770 guid = dd->base_guid + ppd->port - 1;
9771 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9774 /* Set linkinit_reason on power up per OPA spec */
9775 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9777 /* one-time init of the LCB */
9781 ret = init_loopback(dd);
9787 if (ppd->port_type == PORT_TYPE_QSFP) {
9788 set_qsfp_int_n(ppd, 0);
9789 wait_for_qsfp_init(ppd);
9790 set_qsfp_int_n(ppd, 1);
9793 try_start_link(ppd);
9797 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9799 struct hfi1_devdata *dd = ppd->dd;
9802 * Shut down the link and keep it down. First turn off that the
9803 * driver wants to allow the link to be up (driver_link_ready).
9804 * Then make sure the link is not automatically restarted
9805 * (link_enabled). Cancel any pending restart. And finally
9808 ppd->driver_link_ready = 0;
9809 ppd->link_enabled = 0;
9811 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9812 flush_delayed_work(&ppd->start_link_work);
9813 cancel_delayed_work_sync(&ppd->start_link_work);
9815 ppd->offline_disabled_reason =
9816 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9817 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9818 OPA_LINKDOWN_REASON_SMA_DISABLED);
9819 set_link_state(ppd, HLS_DN_OFFLINE);
9821 /* disable the port */
9822 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9825 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9827 struct hfi1_pportdata *ppd;
9830 ppd = (struct hfi1_pportdata *)(dd + 1);
9831 for (i = 0; i < dd->num_pports; i++, ppd++) {
9832 ppd->ibport_data.rvp.rc_acks = NULL;
9833 ppd->ibport_data.rvp.rc_qacks = NULL;
9834 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9835 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9836 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9837 if (!ppd->ibport_data.rvp.rc_acks ||
9838 !ppd->ibport_data.rvp.rc_delayed_comp ||
9839 !ppd->ibport_data.rvp.rc_qacks)
9847 * index is the index into the receive array
9849 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9850 u32 type, unsigned long pa, u16 order)
9854 if (!(dd->flags & HFI1_PRESENT))
9857 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9860 } else if (type > PT_INVALID) {
9862 "unexpected receive array type %u for index %u, not handled\n",
9866 trace_hfi1_put_tid(dd, index, type, pa, order);
9868 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9869 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9870 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9871 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9872 << RCV_ARRAY_RT_ADDR_SHIFT;
9873 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9874 writeq(reg, dd->rcvarray_wc + (index * 8));
9876 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9878 * Eager entries are written and flushed
9880 * Expected entries are flushed every 4 writes
9887 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9889 struct hfi1_devdata *dd = rcd->dd;
9892 /* this could be optimized */
9893 for (i = rcd->eager_base; i < rcd->eager_base +
9894 rcd->egrbufs.alloced; i++)
9895 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9897 for (i = rcd->expected_base;
9898 i < rcd->expected_base + rcd->expected_count; i++)
9899 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9902 static const char * const ib_cfg_name_strings[] = {
9903 "HFI1_IB_CFG_LIDLMC",
9904 "HFI1_IB_CFG_LWID_DG_ENB",
9905 "HFI1_IB_CFG_LWID_ENB",
9907 "HFI1_IB_CFG_SPD_ENB",
9909 "HFI1_IB_CFG_RXPOL_ENB",
9910 "HFI1_IB_CFG_LREV_ENB",
9911 "HFI1_IB_CFG_LINKLATENCY",
9912 "HFI1_IB_CFG_HRTBT",
9913 "HFI1_IB_CFG_OP_VLS",
9914 "HFI1_IB_CFG_VL_HIGH_CAP",
9915 "HFI1_IB_CFG_VL_LOW_CAP",
9916 "HFI1_IB_CFG_OVERRUN_THRESH",
9917 "HFI1_IB_CFG_PHYERR_THRESH",
9918 "HFI1_IB_CFG_LINKDEFAULT",
9919 "HFI1_IB_CFG_PKEYS",
9921 "HFI1_IB_CFG_LSTATE",
9922 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9923 "HFI1_IB_CFG_PMA_TICKS",
9927 static const char *ib_cfg_name(int which)
9929 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9931 return ib_cfg_name_strings[which];
9934 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9936 struct hfi1_devdata *dd = ppd->dd;
9940 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9941 val = ppd->link_width_enabled;
9943 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9944 val = ppd->link_width_active;
9946 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9947 val = ppd->link_speed_enabled;
9949 case HFI1_IB_CFG_SPD: /* current Link speed */
9950 val = ppd->link_speed_active;
9953 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9954 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9955 case HFI1_IB_CFG_LINKLATENCY:
9958 case HFI1_IB_CFG_OP_VLS:
9959 val = ppd->actual_vls_operational;
9961 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9962 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9964 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9965 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9967 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9968 val = ppd->overrun_threshold;
9970 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9971 val = ppd->phy_error_threshold;
9973 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9974 val = dd->link_default;
9977 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9978 case HFI1_IB_CFG_PMA_TICKS:
9981 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9984 "%s: which %s: not implemented\n",
9986 ib_cfg_name(which));
9994 * The largest MAD packet size.
9996 #define MAX_MAD_PACKET 2048
9999 * Return the maximum header bytes that can go on the _wire_
10000 * for this device. This count includes the ICRC which is
10001 * not part of the packet held in memory but it is appended
10003 * This is dependent on the device's receive header entry size.
10004 * HFI allows this to be set per-receive context, but the
10005 * driver presently enforces a global value.
10007 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10010 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10011 * the Receive Header Entry Size minus the PBC (or RHF) size
10012 * plus one DW for the ICRC appended by HW.
10014 * dd->rcd[0].rcvhdrqentsize is in DW.
10015 * We use rcd[0] as all context will have the same value. Also,
10016 * the first kernel context would have been allocated by now so
10017 * we are guaranteed a valid value.
10019 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10024 * @ppd - per port data
10026 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10027 * registers compare against LRH.PktLen, so use the max bytes included
10030 * This routine changes all VL values except VL15, which it maintains at
10033 static void set_send_length(struct hfi1_pportdata *ppd)
10035 struct hfi1_devdata *dd = ppd->dd;
10036 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10037 u32 maxvlmtu = dd->vld[15].mtu;
10038 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10039 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10040 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10044 for (i = 0; i < ppd->vls_supported; i++) {
10045 if (dd->vld[i].mtu > maxvlmtu)
10046 maxvlmtu = dd->vld[i].mtu;
10048 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10049 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10050 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10052 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10053 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10054 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10056 write_csr(dd, SEND_LEN_CHECK0, len1);
10057 write_csr(dd, SEND_LEN_CHECK1, len2);
10058 /* adjust kernel credit return thresholds based on new MTUs */
10059 /* all kernel receive contexts have the same hdrqentsize */
10060 for (i = 0; i < ppd->vls_supported; i++) {
10061 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10062 sc_mtu_to_threshold(dd->vld[i].sc,
10064 dd->rcd[0]->rcvhdrqentsize));
10065 for (j = 0; j < INIT_SC_PER_VL; j++)
10066 sc_set_cr_threshold(
10067 pio_select_send_context_vl(dd, j, i),
10070 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10071 sc_mtu_to_threshold(dd->vld[15].sc,
10073 dd->rcd[0]->rcvhdrqentsize));
10074 sc_set_cr_threshold(dd->vld[15].sc, thres);
10076 /* Adjust maximum MTU for the port in DC */
10077 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10078 (ilog2(maxvlmtu >> 8) + 1);
10079 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10080 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10081 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10082 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10083 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10086 static void set_lidlmc(struct hfi1_pportdata *ppd)
10090 struct hfi1_devdata *dd = ppd->dd;
10091 u32 mask = ~((1U << ppd->lmc) - 1);
10092 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10096 * Program 0 in CSR if port lid is extended. This prevents
10097 * 9B packets being sent out for large lids.
10099 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10100 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10101 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10102 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10103 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10104 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10105 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10106 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10109 * Iterate over all the send contexts and set their SLID check
10111 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10112 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10113 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10114 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10116 for (i = 0; i < dd->chip_send_contexts; i++) {
10117 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10119 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10122 /* Now we have to do the same thing for the sdma engines */
10123 sdma_update_lmc(dd, mask, lid);
10126 static const char *state_completed_string(u32 completed)
10128 static const char * const state_completed[] = {
10134 if (completed < ARRAY_SIZE(state_completed))
10135 return state_completed[completed];
10140 static const char all_lanes_dead_timeout_expired[] =
10141 "All lanes were inactive – was the interconnect media removed?";
10142 static const char tx_out_of_policy[] =
10143 "Passing lanes on local port do not meet the local link width policy";
10144 static const char no_state_complete[] =
10145 "State timeout occurred before link partner completed the state";
10146 static const char * const state_complete_reasons[] = {
10147 [0x00] = "Reason unknown",
10148 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10149 [0x02] = "Link partner reported failure",
10150 [0x10] = "Unable to achieve frame sync on any lane",
10152 "Unable to find a common bit rate with the link partner",
10154 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10156 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10157 [0x14] = no_state_complete,
10159 "State timeout occurred before link partner identified equalization presets",
10161 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10162 [0x17] = tx_out_of_policy,
10163 [0x20] = all_lanes_dead_timeout_expired,
10165 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10166 [0x22] = no_state_complete,
10168 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10169 [0x24] = tx_out_of_policy,
10170 [0x30] = all_lanes_dead_timeout_expired,
10172 "State timeout occurred waiting for host to process received frames",
10173 [0x32] = no_state_complete,
10175 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10176 [0x34] = tx_out_of_policy,
10179 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10182 const char *str = NULL;
10184 if (code < ARRAY_SIZE(state_complete_reasons))
10185 str = state_complete_reasons[code];
10192 /* describe the given last state complete frame */
10193 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10194 const char *prefix)
10196 struct hfi1_devdata *dd = ppd->dd;
10204 * [ 0: 0] - success
10206 * [ 7: 4] - next state timeout
10207 * [15: 8] - reason code
10210 success = frame & 0x1;
10211 state = (frame >> 1) & 0x7;
10212 reason = (frame >> 8) & 0xff;
10213 lanes = (frame >> 16) & 0xffff;
10215 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10217 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10218 state_completed_string(state), state);
10219 dd_dev_err(dd, " state successfully completed: %s\n",
10220 success ? "yes" : "no");
10221 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10222 reason, state_complete_reason_code_string(ppd, reason));
10223 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10227 * Read the last state complete frames and explain them. This routine
10228 * expects to be called if the link went down during link negotiation
10229 * and initialization (LNI). That is, anywhere between polling and link up.
10231 static void check_lni_states(struct hfi1_pportdata *ppd)
10233 u32 last_local_state;
10234 u32 last_remote_state;
10236 read_last_local_state(ppd->dd, &last_local_state);
10237 read_last_remote_state(ppd->dd, &last_remote_state);
10240 * Don't report anything if there is nothing to report. A value of
10241 * 0 means the link was taken down while polling and there was no
10242 * training in-process.
10244 if (last_local_state == 0 && last_remote_state == 0)
10247 decode_state_complete(ppd, last_local_state, "transmitted");
10248 decode_state_complete(ppd, last_remote_state, "received");
10251 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10252 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10255 unsigned long timeout;
10257 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10258 timeout = jiffies + msecs_to_jiffies(wait_ms);
10260 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10263 if (time_after(jiffies, timeout)) {
10265 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10273 /* called when the logical link state is not down as it should be */
10274 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10276 struct hfi1_devdata *dd = ppd->dd;
10279 * Bring link up in LCB loopback
10281 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10282 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10283 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10285 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10286 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10287 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10288 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10290 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10291 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10293 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10294 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10296 wait_link_transfer_active(dd, 100);
10299 * Bring the link down again.
10301 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10302 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10303 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10305 /* adjust ppd->statusp, if needed */
10306 update_statusp(ppd, IB_PORT_DOWN);
10308 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10312 * Helper for set_link_state(). Do not call except from that routine.
10313 * Expects ppd->hls_mutex to be held.
10315 * @rem_reason value to be sent to the neighbor
10317 * LinkDownReasons only set if transition succeeds.
10319 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10321 struct hfi1_devdata *dd = ppd->dd;
10322 u32 previous_state;
10323 int offline_state_ret;
10326 update_lcb_cache(dd);
10328 previous_state = ppd->host_link_state;
10329 ppd->host_link_state = HLS_GOING_OFFLINE;
10331 /* start offline transition */
10332 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10334 if (ret != HCMD_SUCCESS) {
10336 "Failed to transition to Offline link state, return %d\n",
10340 if (ppd->offline_disabled_reason ==
10341 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10342 ppd->offline_disabled_reason =
10343 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10345 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10346 if (offline_state_ret < 0)
10347 return offline_state_ret;
10349 /* Disabling AOC transmitters */
10350 if (ppd->port_type == PORT_TYPE_QSFP &&
10351 ppd->qsfp_info.limiting_active &&
10352 qsfp_mod_present(ppd)) {
10355 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10357 set_qsfp_tx(ppd, 0);
10358 release_chip_resource(dd, qsfp_resource(dd));
10360 /* not fatal, but should warn */
10362 "Unable to acquire lock to turn off QSFP TX\n");
10367 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10368 * can take a while for the link to go down.
10370 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10371 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10377 * Now in charge of LCB - must be after the physical state is
10378 * offline.quiet and before host_link_state is changed.
10380 set_host_lcb_access(dd);
10381 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10383 /* make sure the logical state is also down */
10384 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10386 force_logical_link_state_down(ppd);
10388 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10391 * The LNI has a mandatory wait time after the physical state
10392 * moves to Offline.Quiet. The wait time may be different
10393 * depending on how the link went down. The 8051 firmware
10394 * will observe the needed wait time and only move to ready
10395 * when that is completed. The largest of the quiet timeouts
10396 * is 6s, so wait that long and then at least 0.5s more for
10397 * other transitions, and another 0.5s for a buffer.
10399 ret = wait_fm_ready(dd, 7000);
10402 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10403 /* state is really offline, so make it so */
10404 ppd->host_link_state = HLS_DN_OFFLINE;
10409 * The state is now offline and the 8051 is ready to accept host
10411 * - change our state
10412 * - notify others if we were previously in a linkup state
10414 ppd->host_link_state = HLS_DN_OFFLINE;
10415 if (previous_state & HLS_UP) {
10416 /* went down while link was up */
10417 handle_linkup_change(dd, 0);
10418 } else if (previous_state
10419 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10420 /* went down while attempting link up */
10421 check_lni_states(ppd);
10423 /* The QSFP doesn't need to be reset on LNI failure */
10424 ppd->qsfp_info.reset_needed = 0;
10427 /* the active link width (downgrade) is 0 on link down */
10428 ppd->link_width_active = 0;
10429 ppd->link_width_downgrade_tx_active = 0;
10430 ppd->link_width_downgrade_rx_active = 0;
10431 ppd->current_egress_rate = 0;
10435 /* return the link state name */
10436 static const char *link_state_name(u32 state)
10439 int n = ilog2(state);
10440 static const char * const names[] = {
10441 [__HLS_UP_INIT_BP] = "INIT",
10442 [__HLS_UP_ARMED_BP] = "ARMED",
10443 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10444 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10445 [__HLS_DN_POLL_BP] = "POLL",
10446 [__HLS_DN_DISABLE_BP] = "DISABLE",
10447 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10448 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10449 [__HLS_GOING_UP_BP] = "GOING_UP",
10450 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10451 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10454 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10455 return name ? name : "unknown";
10458 /* return the link state reason name */
10459 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10461 if (state == HLS_UP_INIT) {
10462 switch (ppd->linkinit_reason) {
10463 case OPA_LINKINIT_REASON_LINKUP:
10465 case OPA_LINKINIT_REASON_FLAPPING:
10466 return "(FLAPPING)";
10467 case OPA_LINKINIT_OUTSIDE_POLICY:
10468 return "(OUTSIDE_POLICY)";
10469 case OPA_LINKINIT_QUARANTINED:
10470 return "(QUARANTINED)";
10471 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10472 return "(INSUFIC_CAPABILITY)";
10481 * driver_pstate - convert the driver's notion of a port's
10482 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10483 * Return -1 (converted to a u32) to indicate error.
10485 u32 driver_pstate(struct hfi1_pportdata *ppd)
10487 switch (ppd->host_link_state) {
10490 case HLS_UP_ACTIVE:
10491 return IB_PORTPHYSSTATE_LINKUP;
10493 return IB_PORTPHYSSTATE_POLLING;
10494 case HLS_DN_DISABLE:
10495 return IB_PORTPHYSSTATE_DISABLED;
10496 case HLS_DN_OFFLINE:
10497 return OPA_PORTPHYSSTATE_OFFLINE;
10498 case HLS_VERIFY_CAP:
10499 return IB_PORTPHYSSTATE_POLLING;
10501 return IB_PORTPHYSSTATE_POLLING;
10502 case HLS_GOING_OFFLINE:
10503 return OPA_PORTPHYSSTATE_OFFLINE;
10504 case HLS_LINK_COOLDOWN:
10505 return OPA_PORTPHYSSTATE_OFFLINE;
10506 case HLS_DN_DOWNDEF:
10508 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10509 ppd->host_link_state);
10515 * driver_lstate - convert the driver's notion of a port's
10516 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10517 * (converted to a u32) to indicate error.
10519 u32 driver_lstate(struct hfi1_pportdata *ppd)
10521 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10522 return IB_PORT_DOWN;
10524 switch (ppd->host_link_state & HLS_UP) {
10526 return IB_PORT_INIT;
10528 return IB_PORT_ARMED;
10529 case HLS_UP_ACTIVE:
10530 return IB_PORT_ACTIVE;
10532 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10533 ppd->host_link_state);
10538 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10539 u8 neigh_reason, u8 rem_reason)
10541 if (ppd->local_link_down_reason.latest == 0 &&
10542 ppd->neigh_link_down_reason.latest == 0) {
10543 ppd->local_link_down_reason.latest = lcl_reason;
10544 ppd->neigh_link_down_reason.latest = neigh_reason;
10545 ppd->remote_link_down_reason = rem_reason;
10550 * Verify if BCT for data VLs is non-zero.
10552 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10554 return !!ppd->actual_vls_operational;
10558 * Change the physical and/or logical link state.
10560 * Do not call this routine while inside an interrupt. It contains
10561 * calls to routines that can take multiple seconds to finish.
10563 * Returns 0 on success, -errno on failure.
10565 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10567 struct hfi1_devdata *dd = ppd->dd;
10568 struct ib_event event = {.device = NULL};
10570 int orig_new_state, poll_bounce;
10572 mutex_lock(&ppd->hls_lock);
10574 orig_new_state = state;
10575 if (state == HLS_DN_DOWNDEF)
10576 state = dd->link_default;
10578 /* interpret poll -> poll as a link bounce */
10579 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10580 state == HLS_DN_POLL;
10582 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10583 link_state_name(ppd->host_link_state),
10584 link_state_name(orig_new_state),
10585 poll_bounce ? "(bounce) " : "",
10586 link_state_reason_name(ppd, state));
10589 * If we're going to a (HLS_*) link state that implies the logical
10590 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10591 * reset is_sm_config_started to 0.
10593 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10594 ppd->is_sm_config_started = 0;
10597 * Do nothing if the states match. Let a poll to poll link bounce
10600 if (ppd->host_link_state == state && !poll_bounce)
10605 if (ppd->host_link_state == HLS_DN_POLL &&
10606 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10608 * Quick link up jumps from polling to here.
10610 * Whether in normal or loopback mode, the
10611 * simulator jumps from polling to link up.
10612 * Accept that here.
10615 } else if (ppd->host_link_state != HLS_GOING_UP) {
10620 * Wait for Link_Up physical state.
10621 * Physical and Logical states should already be
10622 * be transitioned to LinkUp and LinkInit respectively.
10624 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10627 "%s: physical state did not change to LINK-UP\n",
10632 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10635 "%s: logical state did not change to INIT\n",
10640 /* clear old transient LINKINIT_REASON code */
10641 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10642 ppd->linkinit_reason =
10643 OPA_LINKINIT_REASON_LINKUP;
10645 /* enable the port */
10646 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10648 handle_linkup_change(dd, 1);
10649 ppd->host_link_state = HLS_UP_INIT;
10652 if (ppd->host_link_state != HLS_UP_INIT)
10655 if (!data_vls_operational(ppd)) {
10657 "%s: data VLs not operational\n", __func__);
10662 set_logical_state(dd, LSTATE_ARMED);
10663 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10666 "%s: logical state did not change to ARMED\n",
10670 ppd->host_link_state = HLS_UP_ARMED;
10672 * The simulator does not currently implement SMA messages,
10673 * so neighbor_normal is not set. Set it here when we first
10676 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10677 ppd->neighbor_normal = 1;
10679 case HLS_UP_ACTIVE:
10680 if (ppd->host_link_state != HLS_UP_ARMED)
10683 set_logical_state(dd, LSTATE_ACTIVE);
10684 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10687 "%s: logical state did not change to ACTIVE\n",
10690 /* tell all engines to go running */
10691 sdma_all_running(dd);
10692 ppd->host_link_state = HLS_UP_ACTIVE;
10694 /* Signal the IB layer that the port has went active */
10695 event.device = &dd->verbs_dev.rdi.ibdev;
10696 event.element.port_num = ppd->port;
10697 event.event = IB_EVENT_PORT_ACTIVE;
10701 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10702 ppd->host_link_state == HLS_DN_OFFLINE) &&
10705 /* Hand LED control to the DC */
10706 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10708 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10709 u8 tmp = ppd->link_enabled;
10711 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10713 ppd->link_enabled = tmp;
10716 ppd->remote_link_down_reason = 0;
10718 if (ppd->driver_link_ready)
10719 ppd->link_enabled = 1;
10722 set_all_slowpath(ppd->dd);
10723 ret = set_local_link_attributes(ppd);
10727 ppd->port_error_action = 0;
10728 ppd->host_link_state = HLS_DN_POLL;
10730 if (quick_linkup) {
10731 /* quick linkup does not go into polling */
10732 ret = do_quick_linkup(dd);
10734 ret1 = set_physical_link_state(dd, PLS_POLLING);
10735 if (ret1 != HCMD_SUCCESS) {
10737 "Failed to transition to Polling link state, return 0x%x\n",
10742 ppd->offline_disabled_reason =
10743 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10745 * If an error occurred above, go back to offline. The
10746 * caller may reschedule another attempt.
10749 goto_offline(ppd, 0);
10751 log_physical_state(ppd, PLS_POLLING);
10753 case HLS_DN_DISABLE:
10754 /* link is disabled */
10755 ppd->link_enabled = 0;
10757 /* allow any state to transition to disabled */
10759 /* must transition to offline first */
10760 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10761 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10764 ppd->remote_link_down_reason = 0;
10767 if (!dd->dc_shutdown) {
10768 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10769 if (ret1 != HCMD_SUCCESS) {
10771 "Failed to transition to Disabled link state, return 0x%x\n",
10776 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10779 "%s: physical state did not change to DISABLED\n",
10785 ppd->host_link_state = HLS_DN_DISABLE;
10787 case HLS_DN_OFFLINE:
10788 if (ppd->host_link_state == HLS_DN_DISABLE)
10791 /* allow any state to transition to offline */
10792 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10794 ppd->remote_link_down_reason = 0;
10796 case HLS_VERIFY_CAP:
10797 if (ppd->host_link_state != HLS_DN_POLL)
10799 ppd->host_link_state = HLS_VERIFY_CAP;
10800 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10803 if (ppd->host_link_state != HLS_VERIFY_CAP)
10806 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10807 if (ret1 != HCMD_SUCCESS) {
10809 "Failed to transition to link up state, return 0x%x\n",
10814 ppd->host_link_state = HLS_GOING_UP;
10817 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10818 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10820 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10829 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10830 __func__, link_state_name(ppd->host_link_state),
10831 link_state_name(state));
10835 mutex_unlock(&ppd->hls_lock);
10838 ib_dispatch_event(&event);
10843 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10849 case HFI1_IB_CFG_LIDLMC:
10852 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10854 * The VL Arbitrator high limit is sent in units of 4k
10855 * bytes, while HFI stores it in units of 64 bytes.
10858 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10859 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10860 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10862 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10863 /* HFI only supports POLL as the default link down state */
10864 if (val != HLS_DN_POLL)
10867 case HFI1_IB_CFG_OP_VLS:
10868 if (ppd->vls_operational != val) {
10869 ppd->vls_operational = val;
10875 * For link width, link width downgrade, and speed enable, always AND
10876 * the setting with what is actually supported. This has two benefits.
10877 * First, enabled can't have unsupported values, no matter what the
10878 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10879 * "fill in with your supported value" have all the bits in the
10880 * field set, so simply ANDing with supported has the desired result.
10882 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10883 ppd->link_width_enabled = val & ppd->link_width_supported;
10885 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10886 ppd->link_width_downgrade_enabled =
10887 val & ppd->link_width_downgrade_supported;
10889 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10890 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10892 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10894 * HFI does not follow IB specs, save this value
10895 * so we can report it, if asked.
10897 ppd->overrun_threshold = val;
10899 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10901 * HFI does not follow IB specs, save this value
10902 * so we can report it, if asked.
10904 ppd->phy_error_threshold = val;
10907 case HFI1_IB_CFG_MTU:
10908 set_send_length(ppd);
10911 case HFI1_IB_CFG_PKEYS:
10912 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10913 set_partition_keys(ppd);
10917 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10918 dd_dev_info(ppd->dd,
10919 "%s: which %s, val 0x%x: not implemented\n",
10920 __func__, ib_cfg_name(which), val);
10926 /* begin functions related to vl arbitration table caching */
10927 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10931 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10932 VL_ARB_LOW_PRIO_TABLE_SIZE);
10933 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10934 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10937 * Note that we always return values directly from the
10938 * 'vl_arb_cache' (and do no CSR reads) in response to a
10939 * 'Get(VLArbTable)'. This is obviously correct after a
10940 * 'Set(VLArbTable)', since the cache will then be up to
10941 * date. But it's also correct prior to any 'Set(VLArbTable)'
10942 * since then both the cache, and the relevant h/w registers
10946 for (i = 0; i < MAX_PRIO_TABLE; i++)
10947 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10951 * vl_arb_lock_cache
10953 * All other vl_arb_* functions should be called only after locking
10956 static inline struct vl_arb_cache *
10957 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10959 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10961 spin_lock(&ppd->vl_arb_cache[idx].lock);
10962 return &ppd->vl_arb_cache[idx];
10965 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10967 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10970 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10971 struct ib_vl_weight_elem *vl)
10973 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10976 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10977 struct ib_vl_weight_elem *vl)
10979 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10982 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10983 struct ib_vl_weight_elem *vl)
10985 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10988 /* end functions related to vl arbitration table caching */
10990 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10991 u32 size, struct ib_vl_weight_elem *vl)
10993 struct hfi1_devdata *dd = ppd->dd;
10995 unsigned int i, is_up = 0;
10996 int drain, ret = 0;
10998 mutex_lock(&ppd->hls_lock);
11000 if (ppd->host_link_state & HLS_UP)
11003 drain = !is_ax(dd) && is_up;
11007 * Before adjusting VL arbitration weights, empty per-VL
11008 * FIFOs, otherwise a packet whose VL weight is being
11009 * set to 0 could get stuck in a FIFO with no chance to
11012 ret = stop_drain_data_vls(dd);
11017 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11022 for (i = 0; i < size; i++, vl++) {
11024 * NOTE: The low priority shift and mask are used here, but
11025 * they are the same for both the low and high registers.
11027 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11028 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11029 | (((u64)vl->weight
11030 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11031 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11032 write_csr(dd, target + (i * 8), reg);
11034 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11037 open_fill_data_vls(dd); /* reopen all VLs */
11040 mutex_unlock(&ppd->hls_lock);
11046 * Read one credit merge VL register.
11048 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11049 struct vl_limit *vll)
11051 u64 reg = read_csr(dd, csr);
11053 vll->dedicated = cpu_to_be16(
11054 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11055 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11056 vll->shared = cpu_to_be16(
11057 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11058 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11062 * Read the current credit merge limits.
11064 static int get_buffer_control(struct hfi1_devdata *dd,
11065 struct buffer_control *bc, u16 *overall_limit)
11070 /* not all entries are filled in */
11071 memset(bc, 0, sizeof(*bc));
11073 /* OPA and HFI have a 1-1 mapping */
11074 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11075 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11077 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11078 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11080 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11081 bc->overall_shared_limit = cpu_to_be16(
11082 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11083 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11085 *overall_limit = (reg
11086 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11087 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11088 return sizeof(struct buffer_control);
11091 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11096 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11097 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11098 for (i = 0; i < sizeof(u64); i++) {
11099 u8 byte = *(((u8 *)®) + i);
11101 dp->vlnt[2 * i] = byte & 0xf;
11102 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11105 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11106 for (i = 0; i < sizeof(u64); i++) {
11107 u8 byte = *(((u8 *)®) + i);
11109 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11110 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11112 return sizeof(struct sc2vlnt);
11115 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11116 struct ib_vl_weight_elem *vl)
11120 for (i = 0; i < nelems; i++, vl++) {
11126 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11128 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11130 0, dp->vlnt[0] & 0xf,
11131 1, dp->vlnt[1] & 0xf,
11132 2, dp->vlnt[2] & 0xf,
11133 3, dp->vlnt[3] & 0xf,
11134 4, dp->vlnt[4] & 0xf,
11135 5, dp->vlnt[5] & 0xf,
11136 6, dp->vlnt[6] & 0xf,
11137 7, dp->vlnt[7] & 0xf,
11138 8, dp->vlnt[8] & 0xf,
11139 9, dp->vlnt[9] & 0xf,
11140 10, dp->vlnt[10] & 0xf,
11141 11, dp->vlnt[11] & 0xf,
11142 12, dp->vlnt[12] & 0xf,
11143 13, dp->vlnt[13] & 0xf,
11144 14, dp->vlnt[14] & 0xf,
11145 15, dp->vlnt[15] & 0xf));
11146 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11147 DC_SC_VL_VAL(31_16,
11148 16, dp->vlnt[16] & 0xf,
11149 17, dp->vlnt[17] & 0xf,
11150 18, dp->vlnt[18] & 0xf,
11151 19, dp->vlnt[19] & 0xf,
11152 20, dp->vlnt[20] & 0xf,
11153 21, dp->vlnt[21] & 0xf,
11154 22, dp->vlnt[22] & 0xf,
11155 23, dp->vlnt[23] & 0xf,
11156 24, dp->vlnt[24] & 0xf,
11157 25, dp->vlnt[25] & 0xf,
11158 26, dp->vlnt[26] & 0xf,
11159 27, dp->vlnt[27] & 0xf,
11160 28, dp->vlnt[28] & 0xf,
11161 29, dp->vlnt[29] & 0xf,
11162 30, dp->vlnt[30] & 0xf,
11163 31, dp->vlnt[31] & 0xf));
11166 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11170 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11171 what, (int)limit, idx);
11174 /* change only the shared limit portion of SendCmGLobalCredit */
11175 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11179 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11180 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11181 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11182 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11185 /* change only the total credit limit portion of SendCmGLobalCredit */
11186 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11190 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11191 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11192 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11193 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11196 /* set the given per-VL shared limit */
11197 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11202 if (vl < TXE_NUM_DATA_VL)
11203 addr = SEND_CM_CREDIT_VL + (8 * vl);
11205 addr = SEND_CM_CREDIT_VL15;
11207 reg = read_csr(dd, addr);
11208 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11209 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11210 write_csr(dd, addr, reg);
11213 /* set the given per-VL dedicated limit */
11214 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11219 if (vl < TXE_NUM_DATA_VL)
11220 addr = SEND_CM_CREDIT_VL + (8 * vl);
11222 addr = SEND_CM_CREDIT_VL15;
11224 reg = read_csr(dd, addr);
11225 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11226 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11227 write_csr(dd, addr, reg);
11230 /* spin until the given per-VL status mask bits clear */
11231 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11234 unsigned long timeout;
11237 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11239 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11242 return; /* success */
11243 if (time_after(jiffies, timeout))
11244 break; /* timed out */
11249 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11250 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11252 * If this occurs, it is likely there was a credit loss on the link.
11253 * The only recovery from that is a link bounce.
11256 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11260 * The number of credits on the VLs may be changed while everything
11261 * is "live", but the following algorithm must be followed due to
11262 * how the hardware is actually implemented. In particular,
11263 * Return_Credit_Status[] is the only correct status check.
11265 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11266 * set Global_Shared_Credit_Limit = 0
11268 * mask0 = all VLs that are changing either dedicated or shared limits
11269 * set Shared_Limit[mask0] = 0
11270 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11271 * if (changing any dedicated limit)
11272 * mask1 = all VLs that are lowering dedicated limits
11273 * lower Dedicated_Limit[mask1]
11274 * spin until Return_Credit_Status[mask1] == 0
11275 * raise Dedicated_Limits
11276 * raise Shared_Limits
11277 * raise Global_Shared_Credit_Limit
11279 * lower = if the new limit is lower, set the limit to the new value
11280 * raise = if the new limit is higher than the current value (may be changed
11281 * earlier in the algorithm), set the new limit to the new value
11283 int set_buffer_control(struct hfi1_pportdata *ppd,
11284 struct buffer_control *new_bc)
11286 struct hfi1_devdata *dd = ppd->dd;
11287 u64 changing_mask, ld_mask, stat_mask;
11289 int i, use_all_mask;
11290 int this_shared_changing;
11291 int vl_count = 0, ret;
11293 * A0: add the variable any_shared_limit_changing below and in the
11294 * algorithm above. If removing A0 support, it can be removed.
11296 int any_shared_limit_changing;
11297 struct buffer_control cur_bc;
11298 u8 changing[OPA_MAX_VLS];
11299 u8 lowering_dedicated[OPA_MAX_VLS];
11302 const u64 all_mask =
11303 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11304 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11305 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11306 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11307 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11308 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11309 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11310 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11311 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11313 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11314 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11316 /* find the new total credits, do sanity check on unused VLs */
11317 for (i = 0; i < OPA_MAX_VLS; i++) {
11319 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11322 nonzero_msg(dd, i, "dedicated",
11323 be16_to_cpu(new_bc->vl[i].dedicated));
11324 nonzero_msg(dd, i, "shared",
11325 be16_to_cpu(new_bc->vl[i].shared));
11326 new_bc->vl[i].dedicated = 0;
11327 new_bc->vl[i].shared = 0;
11329 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11331 /* fetch the current values */
11332 get_buffer_control(dd, &cur_bc, &cur_total);
11335 * Create the masks we will use.
11337 memset(changing, 0, sizeof(changing));
11338 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11340 * NOTE: Assumes that the individual VL bits are adjacent and in
11344 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11348 any_shared_limit_changing = 0;
11349 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11352 this_shared_changing = new_bc->vl[i].shared
11353 != cur_bc.vl[i].shared;
11354 if (this_shared_changing)
11355 any_shared_limit_changing = 1;
11356 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11357 this_shared_changing) {
11359 changing_mask |= stat_mask;
11362 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11363 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11364 lowering_dedicated[i] = 1;
11365 ld_mask |= stat_mask;
11369 /* bracket the credit change with a total adjustment */
11370 if (new_total > cur_total)
11371 set_global_limit(dd, new_total);
11374 * Start the credit change algorithm.
11377 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11378 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11379 (is_ax(dd) && any_shared_limit_changing)) {
11380 set_global_shared(dd, 0);
11381 cur_bc.overall_shared_limit = 0;
11385 for (i = 0; i < NUM_USABLE_VLS; i++) {
11390 set_vl_shared(dd, i, 0);
11391 cur_bc.vl[i].shared = 0;
11395 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11398 if (change_count > 0) {
11399 for (i = 0; i < NUM_USABLE_VLS; i++) {
11403 if (lowering_dedicated[i]) {
11404 set_vl_dedicated(dd, i,
11405 be16_to_cpu(new_bc->
11407 cur_bc.vl[i].dedicated =
11408 new_bc->vl[i].dedicated;
11412 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11414 /* now raise all dedicated that are going up */
11415 for (i = 0; i < NUM_USABLE_VLS; i++) {
11419 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11420 be16_to_cpu(cur_bc.vl[i].dedicated))
11421 set_vl_dedicated(dd, i,
11422 be16_to_cpu(new_bc->
11427 /* next raise all shared that are going up */
11428 for (i = 0; i < NUM_USABLE_VLS; i++) {
11432 if (be16_to_cpu(new_bc->vl[i].shared) >
11433 be16_to_cpu(cur_bc.vl[i].shared))
11434 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11437 /* finally raise the global shared */
11438 if (be16_to_cpu(new_bc->overall_shared_limit) >
11439 be16_to_cpu(cur_bc.overall_shared_limit))
11440 set_global_shared(dd,
11441 be16_to_cpu(new_bc->overall_shared_limit));
11443 /* bracket the credit change with a total adjustment */
11444 if (new_total < cur_total)
11445 set_global_limit(dd, new_total);
11448 * Determine the actual number of operational VLS using the number of
11449 * dedicated and shared credits for each VL.
11451 if (change_count > 0) {
11452 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11453 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11454 be16_to_cpu(new_bc->vl[i].shared) > 0)
11456 ppd->actual_vls_operational = vl_count;
11457 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11458 ppd->actual_vls_operational :
11459 ppd->vls_operational,
11462 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11463 ppd->actual_vls_operational :
11464 ppd->vls_operational, NULL);
11472 * Read the given fabric manager table. Return the size of the
11473 * table (in bytes) on success, and a negative error code on
11476 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11480 struct vl_arb_cache *vlc;
11483 case FM_TBL_VL_HIGH_ARB:
11486 * OPA specifies 128 elements (of 2 bytes each), though
11487 * HFI supports only 16 elements in h/w.
11489 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11490 vl_arb_get_cache(vlc, t);
11491 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11493 case FM_TBL_VL_LOW_ARB:
11496 * OPA specifies 128 elements (of 2 bytes each), though
11497 * HFI supports only 16 elements in h/w.
11499 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11500 vl_arb_get_cache(vlc, t);
11501 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11503 case FM_TBL_BUFFER_CONTROL:
11504 size = get_buffer_control(ppd->dd, t, NULL);
11506 case FM_TBL_SC2VLNT:
11507 size = get_sc2vlnt(ppd->dd, t);
11509 case FM_TBL_VL_PREEMPT_ELEMS:
11511 /* OPA specifies 128 elements, of 2 bytes each */
11512 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11514 case FM_TBL_VL_PREEMPT_MATRIX:
11517 * OPA specifies that this is the same size as the VL
11518 * arbitration tables (i.e., 256 bytes).
11528 * Write the given fabric manager table.
11530 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11533 struct vl_arb_cache *vlc;
11536 case FM_TBL_VL_HIGH_ARB:
11537 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11538 if (vl_arb_match_cache(vlc, t)) {
11539 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11542 vl_arb_set_cache(vlc, t);
11543 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11544 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11545 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11547 case FM_TBL_VL_LOW_ARB:
11548 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11549 if (vl_arb_match_cache(vlc, t)) {
11550 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11553 vl_arb_set_cache(vlc, t);
11554 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11555 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11556 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11558 case FM_TBL_BUFFER_CONTROL:
11559 ret = set_buffer_control(ppd, t);
11561 case FM_TBL_SC2VLNT:
11562 set_sc2vlnt(ppd->dd, t);
11571 * Disable all data VLs.
11573 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11575 static int disable_data_vls(struct hfi1_devdata *dd)
11580 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11586 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11587 * Just re-enables all data VLs (the "fill" part happens
11588 * automatically - the name was chosen for symmetry with
11589 * stop_drain_data_vls()).
11591 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11593 int open_fill_data_vls(struct hfi1_devdata *dd)
11598 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11604 * drain_data_vls() - assumes that disable_data_vls() has been called,
11605 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11606 * engines to drop to 0.
11608 static void drain_data_vls(struct hfi1_devdata *dd)
11612 pause_for_credit_return(dd);
11616 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11618 * Use open_fill_data_vls() to resume using data VLs. This pair is
11619 * meant to be used like this:
11621 * stop_drain_data_vls(dd);
11622 * // do things with per-VL resources
11623 * open_fill_data_vls(dd);
11625 int stop_drain_data_vls(struct hfi1_devdata *dd)
11629 ret = disable_data_vls(dd);
11631 drain_data_vls(dd);
11637 * Convert a nanosecond time to a cclock count. No matter how slow
11638 * the cclock, a non-zero ns will always have a non-zero result.
11640 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11644 if (dd->icode == ICODE_FPGA_EMULATION)
11645 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11646 else /* simulation pretends to be ASIC */
11647 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11648 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11654 * Convert a cclock count to nanoseconds. Not matter how slow
11655 * the cclock, a non-zero cclocks will always have a non-zero result.
11657 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11661 if (dd->icode == ICODE_FPGA_EMULATION)
11662 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11663 else /* simulation pretends to be ASIC */
11664 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11665 if (cclocks && !ns)
11671 * Dynamically adjust the receive interrupt timeout for a context based on
11672 * incoming packet rate.
11674 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11676 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11678 struct hfi1_devdata *dd = rcd->dd;
11679 u32 timeout = rcd->rcvavail_timeout;
11682 * This algorithm doubles or halves the timeout depending on whether
11683 * the number of packets received in this interrupt were less than or
11684 * greater equal the interrupt count.
11686 * The calculations below do not allow a steady state to be achieved.
11687 * Only at the endpoints it is possible to have an unchanging
11690 if (npkts < rcv_intr_count) {
11692 * Not enough packets arrived before the timeout, adjust
11693 * timeout downward.
11695 if (timeout < 2) /* already at minimum? */
11700 * More than enough packets arrived before the timeout, adjust
11703 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11705 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11708 rcd->rcvavail_timeout = timeout;
11710 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11711 * been verified to be in range
11713 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11715 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11718 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11719 u32 intr_adjust, u32 npkts)
11721 struct hfi1_devdata *dd = rcd->dd;
11723 u32 ctxt = rcd->ctxt;
11726 * Need to write timeout register before updating RcvHdrHead to ensure
11727 * that a new value is used when the HW decides to restart counting.
11730 adjust_rcv_timeout(rcd, npkts);
11732 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11733 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11734 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11737 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11738 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11739 << RCV_HDR_HEAD_HEAD_SHIFT);
11740 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11744 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11748 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11749 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11751 if (rcd->rcvhdrtail_kvaddr)
11752 tail = get_rcvhdrtail(rcd);
11754 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11756 return head == tail;
11760 * Context Control and Receive Array encoding for buffer size:
11769 * 0x8 512 KB (Receive Array only)
11770 * 0x9 1 MB (Receive Array only)
11771 * 0xa 2 MB (Receive Array only)
11773 * 0xB-0xF - reserved (Receive Array only)
11776 * This routine assumes that the value has already been sanity checked.
11778 static u32 encoded_size(u32 size)
11781 case 4 * 1024: return 0x1;
11782 case 8 * 1024: return 0x2;
11783 case 16 * 1024: return 0x3;
11784 case 32 * 1024: return 0x4;
11785 case 64 * 1024: return 0x5;
11786 case 128 * 1024: return 0x6;
11787 case 256 * 1024: return 0x7;
11788 case 512 * 1024: return 0x8;
11789 case 1 * 1024 * 1024: return 0x9;
11790 case 2 * 1024 * 1024: return 0xa;
11792 return 0x1; /* if invalid, go with the minimum size */
11795 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11796 struct hfi1_ctxtdata *rcd)
11799 int did_enable = 0;
11807 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11809 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11810 /* if the context already enabled, don't do the extra steps */
11811 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11812 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11813 /* reset the tail and hdr addresses, and sequence count */
11814 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11816 if (rcd->rcvhdrtail_kvaddr)
11817 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11818 rcd->rcvhdrqtailaddr_dma);
11821 /* reset the cached receive header queue head value */
11825 * Zero the receive header queue so we don't get false
11826 * positives when checking the sequence number. The
11827 * sequence numbers could land exactly on the same spot.
11828 * E.g. a rcd restart before the receive header wrapped.
11830 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11832 /* starting timeout */
11833 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11835 /* enable the context */
11836 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11838 /* clean the egr buffer size first */
11839 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11840 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11841 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11842 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11844 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11845 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11848 /* zero RcvEgrIndexHead */
11849 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11851 /* set eager count and base index */
11852 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11853 & RCV_EGR_CTRL_EGR_CNT_MASK)
11854 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11855 (((rcd->eager_base >> RCV_SHIFT)
11856 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11857 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11858 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11861 * Set TID (expected) count and base index.
11862 * rcd->expected_count is set to individual RcvArray entries,
11863 * not pairs, and the CSR takes a pair-count in groups of
11864 * four, so divide by 8.
11866 reg = (((rcd->expected_count >> RCV_SHIFT)
11867 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11868 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11869 (((rcd->expected_base >> RCV_SHIFT)
11870 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11871 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11872 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11873 if (ctxt == HFI1_CTRL_CTXT)
11874 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11876 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11877 write_csr(dd, RCV_VL15, 0);
11879 * When receive context is being disabled turn on tail
11880 * update with a dummy tail address and then disable
11883 if (dd->rcvhdrtail_dummy_dma) {
11884 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11885 dd->rcvhdrtail_dummy_dma);
11886 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11887 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11890 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11892 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11893 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11894 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11895 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11896 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11897 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11898 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11899 /* See comment on RcvCtxtCtrl.TailUpd above */
11900 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11901 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11903 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11904 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11905 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11906 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11907 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11909 * In one-packet-per-eager mode, the size comes from
11910 * the RcvArray entry.
11912 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11913 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11915 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11916 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11917 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11918 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11919 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11920 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11921 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11922 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11923 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11924 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11925 rcd->rcvctrl = rcvctrl;
11926 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11927 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11929 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11931 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11932 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11934 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11936 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11937 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11938 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11939 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11940 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11941 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11942 ctxt, reg, reg == 0 ? "not" : "still");
11948 * The interrupt timeout and count must be set after
11949 * the context is enabled to take effect.
11951 /* set interrupt timeout */
11952 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11953 (u64)rcd->rcvavail_timeout <<
11954 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11956 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11957 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11958 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11961 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11963 * If the context has been disabled and the Tail Update has
11964 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11965 * so it doesn't contain an address that is invalid.
11967 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11968 dd->rcvhdrtail_dummy_dma);
11971 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11977 ret = dd->cntrnameslen;
11978 *namep = dd->cntrnames;
11980 const struct cntr_entry *entry;
11983 ret = (dd->ndevcntrs) * sizeof(u64);
11985 /* Get the start of the block of counters */
11986 *cntrp = dd->cntrs;
11989 * Now go and fill in each counter in the block.
11991 for (i = 0; i < DEV_CNTR_LAST; i++) {
11992 entry = &dev_cntrs[i];
11993 hfi1_cdbg(CNTR, "reading %s", entry->name);
11994 if (entry->flags & CNTR_DISABLED) {
11996 hfi1_cdbg(CNTR, "\tDisabled\n");
11998 if (entry->flags & CNTR_VL) {
11999 hfi1_cdbg(CNTR, "\tPer VL\n");
12000 for (j = 0; j < C_VL_COUNT; j++) {
12001 val = entry->rw_cntr(entry,
12007 "\t\tRead 0x%llx for %d\n",
12009 dd->cntrs[entry->offset + j] =
12012 } else if (entry->flags & CNTR_SDMA) {
12014 "\t Per SDMA Engine\n");
12015 for (j = 0; j < dd->chip_sdma_engines;
12018 entry->rw_cntr(entry, dd, j,
12021 "\t\tRead 0x%llx for %d\n",
12023 dd->cntrs[entry->offset + j] =
12027 val = entry->rw_cntr(entry, dd,
12030 dd->cntrs[entry->offset] = val;
12031 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12040 * Used by sysfs to create files for hfi stats to read
12042 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12048 ret = ppd->dd->portcntrnameslen;
12049 *namep = ppd->dd->portcntrnames;
12051 const struct cntr_entry *entry;
12054 ret = ppd->dd->nportcntrs * sizeof(u64);
12055 *cntrp = ppd->cntrs;
12057 for (i = 0; i < PORT_CNTR_LAST; i++) {
12058 entry = &port_cntrs[i];
12059 hfi1_cdbg(CNTR, "reading %s", entry->name);
12060 if (entry->flags & CNTR_DISABLED) {
12062 hfi1_cdbg(CNTR, "\tDisabled\n");
12066 if (entry->flags & CNTR_VL) {
12067 hfi1_cdbg(CNTR, "\tPer VL");
12068 for (j = 0; j < C_VL_COUNT; j++) {
12069 val = entry->rw_cntr(entry, ppd, j,
12074 "\t\tRead 0x%llx for %d",
12076 ppd->cntrs[entry->offset + j] = val;
12079 val = entry->rw_cntr(entry, ppd,
12083 ppd->cntrs[entry->offset] = val;
12084 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12091 static void free_cntrs(struct hfi1_devdata *dd)
12093 struct hfi1_pportdata *ppd;
12096 if (dd->synth_stats_timer.data)
12097 del_timer_sync(&dd->synth_stats_timer);
12098 dd->synth_stats_timer.data = 0;
12099 ppd = (struct hfi1_pportdata *)(dd + 1);
12100 for (i = 0; i < dd->num_pports; i++, ppd++) {
12102 kfree(ppd->scntrs);
12103 free_percpu(ppd->ibport_data.rvp.rc_acks);
12104 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12105 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12107 ppd->scntrs = NULL;
12108 ppd->ibport_data.rvp.rc_acks = NULL;
12109 ppd->ibport_data.rvp.rc_qacks = NULL;
12110 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12112 kfree(dd->portcntrnames);
12113 dd->portcntrnames = NULL;
12118 kfree(dd->cntrnames);
12119 dd->cntrnames = NULL;
12120 if (dd->update_cntr_wq) {
12121 destroy_workqueue(dd->update_cntr_wq);
12122 dd->update_cntr_wq = NULL;
12126 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12127 u64 *psval, void *context, int vl)
12132 if (entry->flags & CNTR_DISABLED) {
12133 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12137 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12139 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12141 /* If its a synthetic counter there is more work we need to do */
12142 if (entry->flags & CNTR_SYNTH) {
12143 if (sval == CNTR_MAX) {
12144 /* No need to read already saturated */
12148 if (entry->flags & CNTR_32BIT) {
12149 /* 32bit counters can wrap multiple times */
12150 u64 upper = sval >> 32;
12151 u64 lower = (sval << 32) >> 32;
12153 if (lower > val) { /* hw wrapped */
12154 if (upper == CNTR_32BIT_MAX)
12160 if (val != CNTR_MAX)
12161 val = (upper << 32) | val;
12164 /* If we rolled we are saturated */
12165 if ((val < sval) || (val > CNTR_MAX))
12172 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12177 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12178 struct cntr_entry *entry,
12179 u64 *psval, void *context, int vl, u64 data)
12183 if (entry->flags & CNTR_DISABLED) {
12184 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12188 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12190 if (entry->flags & CNTR_SYNTH) {
12192 if (entry->flags & CNTR_32BIT) {
12193 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12194 (data << 32) >> 32);
12195 val = data; /* return the full 64bit value */
12197 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12201 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12206 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12211 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12213 struct cntr_entry *entry;
12216 entry = &dev_cntrs[index];
12217 sval = dd->scntrs + entry->offset;
12219 if (vl != CNTR_INVALID_VL)
12222 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12225 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12227 struct cntr_entry *entry;
12230 entry = &dev_cntrs[index];
12231 sval = dd->scntrs + entry->offset;
12233 if (vl != CNTR_INVALID_VL)
12236 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12239 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12241 struct cntr_entry *entry;
12244 entry = &port_cntrs[index];
12245 sval = ppd->scntrs + entry->offset;
12247 if (vl != CNTR_INVALID_VL)
12250 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12251 (index <= C_RCV_HDR_OVF_LAST)) {
12252 /* We do not want to bother for disabled contexts */
12256 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12259 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12261 struct cntr_entry *entry;
12264 entry = &port_cntrs[index];
12265 sval = ppd->scntrs + entry->offset;
12267 if (vl != CNTR_INVALID_VL)
12270 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12271 (index <= C_RCV_HDR_OVF_LAST)) {
12272 /* We do not want to bother for disabled contexts */
12276 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12279 static void do_update_synth_timer(struct work_struct *work)
12286 struct hfi1_pportdata *ppd;
12287 struct cntr_entry *entry;
12288 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12292 * Rather than keep beating on the CSRs pick a minimal set that we can
12293 * check to watch for potential roll over. We can do this by looking at
12294 * the number of flits sent/recv. If the total flits exceeds 32bits then
12295 * we have to iterate all the counters and update.
12297 entry = &dev_cntrs[C_DC_RCV_FLITS];
12298 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12300 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12301 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12305 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12306 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12308 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12310 * May not be strictly necessary to update but it won't hurt and
12311 * simplifies the logic here.
12314 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12317 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12319 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12320 total_flits, (u64)CNTR_32BIT_MAX);
12321 if (total_flits >= CNTR_32BIT_MAX) {
12322 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12329 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12330 for (i = 0; i < DEV_CNTR_LAST; i++) {
12331 entry = &dev_cntrs[i];
12332 if (entry->flags & CNTR_VL) {
12333 for (vl = 0; vl < C_VL_COUNT; vl++)
12334 read_dev_cntr(dd, i, vl);
12336 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12339 ppd = (struct hfi1_pportdata *)(dd + 1);
12340 for (i = 0; i < dd->num_pports; i++, ppd++) {
12341 for (j = 0; j < PORT_CNTR_LAST; j++) {
12342 entry = &port_cntrs[j];
12343 if (entry->flags & CNTR_VL) {
12344 for (vl = 0; vl < C_VL_COUNT; vl++)
12345 read_port_cntr(ppd, j, vl);
12347 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12353 * We want the value in the register. The goal is to keep track
12354 * of the number of "ticks" not the counter value. In other
12355 * words if the register rolls we want to notice it and go ahead
12356 * and force an update.
12358 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12359 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12362 entry = &dev_cntrs[C_DC_RCV_FLITS];
12363 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12366 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12367 dd->unit, dd->last_tx, dd->last_rx);
12370 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12374 static void update_synth_timer(unsigned long opaque)
12376 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12378 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12379 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12382 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12383 static int init_cntrs(struct hfi1_devdata *dd)
12385 int i, rcv_ctxts, j;
12388 char name[C_MAX_NAME];
12389 struct hfi1_pportdata *ppd;
12390 const char *bit_type_32 = ",32";
12391 const int bit_type_32_sz = strlen(bit_type_32);
12393 /* set up the stats timer; the add_timer is done at the end */
12394 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12395 (unsigned long)dd);
12397 /***********************/
12398 /* per device counters */
12399 /***********************/
12401 /* size names and determine how many we have*/
12405 for (i = 0; i < DEV_CNTR_LAST; i++) {
12406 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12407 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12411 if (dev_cntrs[i].flags & CNTR_VL) {
12412 dev_cntrs[i].offset = dd->ndevcntrs;
12413 for (j = 0; j < C_VL_COUNT; j++) {
12414 snprintf(name, C_MAX_NAME, "%s%d",
12415 dev_cntrs[i].name, vl_from_idx(j));
12416 sz += strlen(name);
12417 /* Add ",32" for 32-bit counters */
12418 if (dev_cntrs[i].flags & CNTR_32BIT)
12419 sz += bit_type_32_sz;
12423 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12424 dev_cntrs[i].offset = dd->ndevcntrs;
12425 for (j = 0; j < dd->chip_sdma_engines; j++) {
12426 snprintf(name, C_MAX_NAME, "%s%d",
12427 dev_cntrs[i].name, j);
12428 sz += strlen(name);
12429 /* Add ",32" for 32-bit counters */
12430 if (dev_cntrs[i].flags & CNTR_32BIT)
12431 sz += bit_type_32_sz;
12436 /* +1 for newline. */
12437 sz += strlen(dev_cntrs[i].name) + 1;
12438 /* Add ",32" for 32-bit counters */
12439 if (dev_cntrs[i].flags & CNTR_32BIT)
12440 sz += bit_type_32_sz;
12441 dev_cntrs[i].offset = dd->ndevcntrs;
12446 /* allocate space for the counter values */
12447 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12451 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12455 /* allocate space for the counter names */
12456 dd->cntrnameslen = sz;
12457 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12458 if (!dd->cntrnames)
12461 /* fill in the names */
12462 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12463 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12465 } else if (dev_cntrs[i].flags & CNTR_VL) {
12466 for (j = 0; j < C_VL_COUNT; j++) {
12467 snprintf(name, C_MAX_NAME, "%s%d",
12470 memcpy(p, name, strlen(name));
12473 /* Counter is 32 bits */
12474 if (dev_cntrs[i].flags & CNTR_32BIT) {
12475 memcpy(p, bit_type_32, bit_type_32_sz);
12476 p += bit_type_32_sz;
12481 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12482 for (j = 0; j < dd->chip_sdma_engines; j++) {
12483 snprintf(name, C_MAX_NAME, "%s%d",
12484 dev_cntrs[i].name, j);
12485 memcpy(p, name, strlen(name));
12488 /* Counter is 32 bits */
12489 if (dev_cntrs[i].flags & CNTR_32BIT) {
12490 memcpy(p, bit_type_32, bit_type_32_sz);
12491 p += bit_type_32_sz;
12497 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12498 p += strlen(dev_cntrs[i].name);
12500 /* Counter is 32 bits */
12501 if (dev_cntrs[i].flags & CNTR_32BIT) {
12502 memcpy(p, bit_type_32, bit_type_32_sz);
12503 p += bit_type_32_sz;
12510 /*********************/
12511 /* per port counters */
12512 /*********************/
12515 * Go through the counters for the overflows and disable the ones we
12516 * don't need. This varies based on platform so we need to do it
12517 * dynamically here.
12519 rcv_ctxts = dd->num_rcv_contexts;
12520 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12521 i <= C_RCV_HDR_OVF_LAST; i++) {
12522 port_cntrs[i].flags |= CNTR_DISABLED;
12525 /* size port counter names and determine how many we have*/
12527 dd->nportcntrs = 0;
12528 for (i = 0; i < PORT_CNTR_LAST; i++) {
12529 if (port_cntrs[i].flags & CNTR_DISABLED) {
12530 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12534 if (port_cntrs[i].flags & CNTR_VL) {
12535 port_cntrs[i].offset = dd->nportcntrs;
12536 for (j = 0; j < C_VL_COUNT; j++) {
12537 snprintf(name, C_MAX_NAME, "%s%d",
12538 port_cntrs[i].name, vl_from_idx(j));
12539 sz += strlen(name);
12540 /* Add ",32" for 32-bit counters */
12541 if (port_cntrs[i].flags & CNTR_32BIT)
12542 sz += bit_type_32_sz;
12547 /* +1 for newline */
12548 sz += strlen(port_cntrs[i].name) + 1;
12549 /* Add ",32" for 32-bit counters */
12550 if (port_cntrs[i].flags & CNTR_32BIT)
12551 sz += bit_type_32_sz;
12552 port_cntrs[i].offset = dd->nportcntrs;
12557 /* allocate space for the counter names */
12558 dd->portcntrnameslen = sz;
12559 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12560 if (!dd->portcntrnames)
12563 /* fill in port cntr names */
12564 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12565 if (port_cntrs[i].flags & CNTR_DISABLED)
12568 if (port_cntrs[i].flags & CNTR_VL) {
12569 for (j = 0; j < C_VL_COUNT; j++) {
12570 snprintf(name, C_MAX_NAME, "%s%d",
12571 port_cntrs[i].name, vl_from_idx(j));
12572 memcpy(p, name, strlen(name));
12575 /* Counter is 32 bits */
12576 if (port_cntrs[i].flags & CNTR_32BIT) {
12577 memcpy(p, bit_type_32, bit_type_32_sz);
12578 p += bit_type_32_sz;
12584 memcpy(p, port_cntrs[i].name,
12585 strlen(port_cntrs[i].name));
12586 p += strlen(port_cntrs[i].name);
12588 /* Counter is 32 bits */
12589 if (port_cntrs[i].flags & CNTR_32BIT) {
12590 memcpy(p, bit_type_32, bit_type_32_sz);
12591 p += bit_type_32_sz;
12598 /* allocate per port storage for counter values */
12599 ppd = (struct hfi1_pportdata *)(dd + 1);
12600 for (i = 0; i < dd->num_pports; i++, ppd++) {
12601 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12605 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12610 /* CPU counters need to be allocated and zeroed */
12611 if (init_cpu_counters(dd))
12614 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12615 WQ_MEM_RECLAIM, dd->unit);
12616 if (!dd->update_cntr_wq)
12619 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12621 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12628 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12630 switch (chip_lstate) {
12633 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12637 return IB_PORT_DOWN;
12639 return IB_PORT_INIT;
12641 return IB_PORT_ARMED;
12642 case LSTATE_ACTIVE:
12643 return IB_PORT_ACTIVE;
12647 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12649 /* look at the HFI meta-states only */
12650 switch (chip_pstate & 0xf0) {
12652 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12656 return IB_PORTPHYSSTATE_DISABLED;
12658 return OPA_PORTPHYSSTATE_OFFLINE;
12660 return IB_PORTPHYSSTATE_POLLING;
12661 case PLS_CONFIGPHY:
12662 return IB_PORTPHYSSTATE_TRAINING;
12664 return IB_PORTPHYSSTATE_LINKUP;
12666 return IB_PORTPHYSSTATE_PHY_TEST;
12670 /* return the OPA port logical state name */
12671 const char *opa_lstate_name(u32 lstate)
12673 static const char * const port_logical_names[] = {
12679 "PORT_ACTIVE_DEFER",
12681 if (lstate < ARRAY_SIZE(port_logical_names))
12682 return port_logical_names[lstate];
12686 /* return the OPA port physical state name */
12687 const char *opa_pstate_name(u32 pstate)
12689 static const char * const port_physical_names[] = {
12696 "PHYS_LINK_ERR_RECOVER",
12703 if (pstate < ARRAY_SIZE(port_physical_names))
12704 return port_physical_names[pstate];
12708 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12711 * Set port status flags in the page mapped into userspace
12712 * memory. Do it here to ensure a reliable state - this is
12713 * the only function called by all state handling code.
12714 * Always set the flags due to the fact that the cache value
12715 * might have been changed explicitly outside of this
12718 if (ppd->statusp) {
12722 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12723 HFI1_STATUS_IB_READY);
12725 case IB_PORT_ARMED:
12726 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12728 case IB_PORT_ACTIVE:
12729 *ppd->statusp |= HFI1_STATUS_IB_READY;
12736 * wait_logical_linkstate - wait for an IB link state change to occur
12737 * @ppd: port device
12738 * @state: the state to wait for
12739 * @msecs: the number of milliseconds to wait
12741 * Wait up to msecs milliseconds for IB link state change to occur.
12742 * For now, take the easy polling route.
12743 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12745 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12748 unsigned long timeout;
12751 timeout = jiffies + msecs_to_jiffies(msecs);
12753 new_state = chip_to_opa_lstate(ppd->dd,
12754 read_logical_state(ppd->dd));
12755 if (new_state == state)
12757 if (time_after(jiffies, timeout)) {
12758 dd_dev_err(ppd->dd,
12759 "timeout waiting for link state 0x%x\n",
12766 update_statusp(ppd, state);
12767 dd_dev_info(ppd->dd,
12768 "logical state changed to %s (0x%x)\n",
12769 opa_lstate_name(state),
12774 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12776 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12778 dd_dev_info(ppd->dd,
12779 "physical state changed to %s (0x%x), phy 0x%x\n",
12780 opa_pstate_name(ib_pstate), ib_pstate, state);
12784 * Read the physical hardware link state and check if it matches host
12785 * drivers anticipated state.
12787 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12789 u32 read_state = read_physical_state(ppd->dd);
12791 if (read_state == state) {
12792 log_state_transition(ppd, state);
12794 dd_dev_err(ppd->dd,
12795 "anticipated phy link state 0x%x, read 0x%x\n",
12796 state, read_state);
12801 * wait_physical_linkstate - wait for an physical link state change to occur
12802 * @ppd: port device
12803 * @state: the state to wait for
12804 * @msecs: the number of milliseconds to wait
12806 * Wait up to msecs milliseconds for physical link state change to occur.
12807 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12809 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12813 unsigned long timeout;
12815 timeout = jiffies + msecs_to_jiffies(msecs);
12817 read_state = read_physical_state(ppd->dd);
12818 if (read_state == state)
12820 if (time_after(jiffies, timeout)) {
12821 dd_dev_err(ppd->dd,
12822 "timeout waiting for phy link state 0x%x\n",
12826 usleep_range(1950, 2050); /* sleep 2ms-ish */
12829 log_state_transition(ppd, state);
12834 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12835 * @ppd: port device
12836 * @msecs: the number of milliseconds to wait
12838 * Wait up to msecs milliseconds for any offline physical link
12839 * state change to occur.
12840 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12842 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12846 unsigned long timeout;
12848 timeout = jiffies + msecs_to_jiffies(msecs);
12850 read_state = read_physical_state(ppd->dd);
12851 if ((read_state & 0xF0) == PLS_OFFLINE)
12853 if (time_after(jiffies, timeout)) {
12854 dd_dev_err(ppd->dd,
12855 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12856 read_state, msecs);
12859 usleep_range(1950, 2050); /* sleep 2ms-ish */
12862 log_state_transition(ppd, read_state);
12866 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12867 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12869 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12870 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12872 void hfi1_init_ctxt(struct send_context *sc)
12875 struct hfi1_devdata *dd = sc->dd;
12877 u8 set = (sc->type == SC_USER ?
12878 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12879 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12880 reg = read_kctxt_csr(dd, sc->hw_context,
12881 SEND_CTXT_CHECK_ENABLE);
12883 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12885 SET_STATIC_RATE_CONTROL_SMASK(reg);
12886 write_kctxt_csr(dd, sc->hw_context,
12887 SEND_CTXT_CHECK_ENABLE, reg);
12891 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12896 if (dd->icode != ICODE_RTL_SILICON) {
12897 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12898 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12902 reg = read_csr(dd, ASIC_STS_THERM);
12903 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12904 ASIC_STS_THERM_CURR_TEMP_MASK);
12905 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12906 ASIC_STS_THERM_LO_TEMP_MASK);
12907 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12908 ASIC_STS_THERM_HI_TEMP_MASK);
12909 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12910 ASIC_STS_THERM_CRIT_TEMP_MASK);
12911 /* triggers is a 3-bit value - 1 bit per trigger. */
12912 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12917 /* ========================================================================= */
12920 * Enable/disable chip from delivering interrupts.
12922 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12927 * In HFI, the mask needs to be 1 to allow interrupts.
12930 /* enable all interrupts */
12931 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12932 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12936 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12937 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12942 * Clear all interrupt sources on the chip.
12944 static void clear_all_interrupts(struct hfi1_devdata *dd)
12948 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12949 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12951 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12952 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12953 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12954 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12955 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12956 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12957 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12958 for (i = 0; i < dd->chip_send_contexts; i++)
12959 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12960 for (i = 0; i < dd->chip_sdma_engines; i++)
12961 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12963 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12964 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12965 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12968 /* Move to pcie.c? */
12969 static void disable_intx(struct pci_dev *pdev)
12975 * hfi1_clean_up_interrupts() - Free all IRQ resources
12976 * @dd: valid device data data structure
12978 * Free the MSI or INTx IRQs and assoicated PCI resources,
12979 * if they have been allocated.
12981 void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
12985 /* remove irqs - must happen before disabling/turning off */
12986 if (dd->num_msix_entries) {
12988 struct hfi1_msix_entry *me = dd->msix_entries;
12990 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12991 if (!me->arg) /* => no irq, no affinity */
12993 hfi1_put_irq_affinity(dd, me);
12994 free_irq(me->irq, me->arg);
12997 /* clean structures */
12998 kfree(dd->msix_entries);
12999 dd->msix_entries = NULL;
13000 dd->num_msix_entries = 0;
13003 if (dd->requested_intx_irq) {
13004 free_irq(dd->pcidev->irq, dd);
13005 dd->requested_intx_irq = 0;
13007 disable_intx(dd->pcidev);
13010 pci_free_irq_vectors(dd->pcidev);
13014 * Remap the interrupt source from the general handler to the given MSI-X
13017 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13022 /* clear from the handled mask of the general interrupt */
13025 if (likely(m < CCE_NUM_INT_CSRS)) {
13026 dd->gi_mask[m] &= ~((u64)1 << n);
13028 dd_dev_err(dd, "remap interrupt err\n");
13032 /* direct the chip source to the given MSI-X interrupt */
13035 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13036 reg &= ~((u64)0xff << (8 * n));
13037 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13038 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13041 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13042 int engine, int msix_intr)
13045 * SDMA engine interrupt sources grouped by type, rather than
13046 * engine. Per-engine interrupts are as follows:
13051 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13053 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13055 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13059 static int request_intx_irq(struct hfi1_devdata *dd)
13063 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
13065 ret = request_irq(dd->pcidev->irq, general_interrupt,
13066 IRQF_SHARED, dd->intx_name, dd);
13068 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
13071 dd->requested_intx_irq = 1;
13075 static int request_msix_irqs(struct hfi1_devdata *dd)
13077 int first_general, last_general;
13078 int first_sdma, last_sdma;
13079 int first_rx, last_rx;
13082 /* calculate the ranges we are going to use */
13084 last_general = first_general + 1;
13085 first_sdma = last_general;
13086 last_sdma = first_sdma + dd->num_sdma;
13087 first_rx = last_sdma;
13088 last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
13090 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13091 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13094 * Sanity check - the code expects all SDMA chip source
13095 * interrupts to be in the same CSR, starting at bit 0. Verify
13096 * that this is true by checking the bit location of the start.
13098 BUILD_BUG_ON(IS_SDMA_START % 64);
13100 for (i = 0; i < dd->num_msix_entries; i++) {
13101 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13102 const char *err_info;
13103 irq_handler_t handler;
13104 irq_handler_t thread = NULL;
13107 struct hfi1_ctxtdata *rcd = NULL;
13108 struct sdma_engine *sde = NULL;
13110 /* obtain the arguments to request_irq */
13111 if (first_general <= i && i < last_general) {
13112 idx = i - first_general;
13113 handler = general_interrupt;
13115 snprintf(me->name, sizeof(me->name),
13116 DRIVER_NAME "_%d", dd->unit);
13117 err_info = "general";
13118 me->type = IRQ_GENERAL;
13119 } else if (first_sdma <= i && i < last_sdma) {
13120 idx = i - first_sdma;
13121 sde = &dd->per_sdma[idx];
13122 handler = sdma_interrupt;
13124 snprintf(me->name, sizeof(me->name),
13125 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13127 remap_sdma_interrupts(dd, idx, i);
13128 me->type = IRQ_SDMA;
13129 } else if (first_rx <= i && i < last_rx) {
13130 idx = i - first_rx;
13131 rcd = hfi1_rcd_get_by_index(dd, idx);
13134 * Set the interrupt register and mask for this
13135 * context's interrupt.
13137 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13138 rcd->imask = ((u64)1) <<
13139 ((IS_RCVAVAIL_START + idx) % 64);
13140 handler = receive_context_interrupt;
13141 thread = receive_context_thread;
13143 snprintf(me->name, sizeof(me->name),
13144 DRIVER_NAME "_%d kctxt%d",
13146 err_info = "receive context";
13147 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13148 me->type = IRQ_RCVCTXT;
13149 rcd->msix_intr = i;
13153 /* not in our expected range - complain, then
13157 "Unexpected extra MSI-X interrupt %d\n", i);
13160 /* no argument, no interrupt */
13163 /* make sure the name is terminated */
13164 me->name[sizeof(me->name) - 1] = 0;
13165 me->irq = pci_irq_vector(dd->pcidev, i);
13167 * On err return me->irq. Don't need to clear this
13168 * because 'arg' has not been set, and cleanup will
13169 * do the right thing.
13174 ret = request_threaded_irq(me->irq, handler, thread, 0,
13178 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13179 err_info, me->irq, idx, ret);
13183 * assign arg after request_irq call, so it will be
13188 ret = hfi1_get_irq_affinity(dd, me);
13190 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13196 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13200 if (!dd->num_msix_entries) {
13201 synchronize_irq(dd->pcidev->irq);
13205 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13206 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13207 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13209 synchronize_irq(me->irq);
13213 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13215 struct hfi1_devdata *dd = rcd->dd;
13216 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13218 if (!me->arg) /* => no irq, no affinity */
13221 hfi1_put_irq_affinity(dd, me);
13222 free_irq(me->irq, me->arg);
13227 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13229 struct hfi1_devdata *dd = rcd->dd;
13230 struct hfi1_msix_entry *me;
13231 int idx = rcd->ctxt;
13235 rcd->msix_intr = dd->vnic.msix_idx++;
13236 me = &dd->msix_entries[rcd->msix_intr];
13239 * Set the interrupt register and mask for this
13240 * context's interrupt.
13242 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13243 rcd->imask = ((u64)1) <<
13244 ((IS_RCVAVAIL_START + idx) % 64);
13246 snprintf(me->name, sizeof(me->name),
13247 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13248 me->name[sizeof(me->name) - 1] = 0;
13249 me->type = IRQ_RCVCTXT;
13250 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13252 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13256 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13258 ret = request_threaded_irq(me->irq, receive_context_interrupt,
13259 receive_context_thread, 0, me->name, arg);
13261 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13262 me->irq, idx, ret);
13266 * assign arg after request_irq call, so it will be
13271 ret = hfi1_get_irq_affinity(dd, me);
13274 "unable to pin IRQ %d\n", ret);
13275 free_irq(me->irq, me->arg);
13280 * Set the general handler to accept all interrupts, remap all
13281 * chip interrupts back to MSI-X 0.
13283 static void reset_interrupts(struct hfi1_devdata *dd)
13287 /* all interrupts handled by the general handler */
13288 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13289 dd->gi_mask[i] = ~(u64)0;
13291 /* all chip interrupts map to MSI-X 0 */
13292 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13293 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13296 static int set_up_interrupts(struct hfi1_devdata *dd)
13300 int single_interrupt = 0; /* we expect to have all the interrupts */
13304 * 1 general, "slow path" interrupt (includes the SDMA engines
13305 * slow source, SDMACleanupDone)
13306 * N interrupts - one per used SDMA engine
13307 * M interrupt - one per kernel receive context
13308 * V interrupt - one for each VNIC context
13310 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
13312 /* ask for MSI-X interrupts */
13313 request = request_msix(dd, total);
13317 } else if (request == 0) {
13319 /* dd->num_msix_entries already zero */
13320 single_interrupt = 1;
13321 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
13322 } else if (request < total) {
13323 /* using MSI-X, with reduced interrupts */
13324 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13329 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13331 if (!dd->msix_entries) {
13336 dd->num_msix_entries = total;
13337 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13340 /* mask all interrupts */
13341 set_intr_state(dd, 0);
13342 /* clear all pending interrupts */
13343 clear_all_interrupts(dd);
13345 /* reset general handler mask, chip MSI-X mappings */
13346 reset_interrupts(dd);
13348 if (single_interrupt)
13349 ret = request_intx_irq(dd);
13351 ret = request_msix_irqs(dd);
13358 hfi1_clean_up_interrupts(dd);
13363 * Set up context values in dd. Sets:
13365 * num_rcv_contexts - number of contexts being used
13366 * n_krcv_queues - number of kernel contexts
13367 * first_dyn_alloc_ctxt - first dynamically allocated context
13368 * in array of contexts
13369 * freectxts - number of free user contexts
13370 * num_send_contexts - number of PIO send contexts being used
13371 * num_vnic_contexts - number of contexts reserved for VNIC
13373 static int set_up_context_variables(struct hfi1_devdata *dd)
13375 unsigned long num_kernel_contexts;
13376 u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13377 int total_contexts;
13381 int user_rmt_reduced;
13384 * Kernel receive contexts:
13385 * - Context 0 - control context (VL15/multicast/error)
13386 * - Context 1 - first kernel context
13387 * - Context 2 - second kernel context
13392 * n_krcvqs is the sum of module parameter kernel receive
13393 * contexts, krcvqs[]. It does not include the control
13394 * context, so add that.
13396 num_kernel_contexts = n_krcvqs + 1;
13398 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13400 * Every kernel receive context needs an ACK send context.
13401 * one send context is allocated for each VL{0-7} and VL15
13403 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13405 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13406 (int)(dd->chip_send_contexts - num_vls - 1),
13407 num_kernel_contexts);
13408 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13411 /* Accommodate VNIC contexts if possible */
13412 if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
13413 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13414 num_vnic_contexts = 0;
13416 total_contexts = num_kernel_contexts + num_vnic_contexts;
13420 * - default to 1 user context per real (non-HT) CPU core if
13421 * num_user_contexts is negative
13423 if (num_user_contexts < 0)
13424 num_user_contexts =
13425 cpumask_weight(&node_affinity.real_cpu_mask);
13428 * Adjust the counts given a global max.
13430 if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
13432 "Reducing # user receive contexts to: %d, from %d\n",
13433 (int)(dd->chip_rcv_contexts - total_contexts),
13434 (int)num_user_contexts);
13436 num_user_contexts = dd->chip_rcv_contexts - total_contexts;
13439 /* each user context requires an entry in the RMT */
13440 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13441 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13442 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13444 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13445 (int)num_user_contexts,
13448 num_user_contexts = user_rmt_reduced;
13451 total_contexts += num_user_contexts;
13453 /* the first N are kernel contexts, the rest are user/vnic contexts */
13454 dd->num_rcv_contexts = total_contexts;
13455 dd->n_krcv_queues = num_kernel_contexts;
13456 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13457 dd->num_vnic_contexts = num_vnic_contexts;
13458 dd->num_user_contexts = num_user_contexts;
13459 dd->freectxts = num_user_contexts;
13461 "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13462 (int)dd->chip_rcv_contexts,
13463 (int)dd->num_rcv_contexts,
13464 (int)dd->n_krcv_queues,
13465 dd->num_vnic_contexts,
13466 dd->num_user_contexts);
13469 * Receive array allocation:
13470 * All RcvArray entries are divided into groups of 8. This
13471 * is required by the hardware and will speed up writes to
13472 * consecutive entries by using write-combining of the entire
13475 * The number of groups are evenly divided among all contexts.
13476 * any left over groups will be given to the first N user
13479 dd->rcv_entries.group_size = RCV_INCREMENT;
13480 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13481 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13482 dd->rcv_entries.nctxt_extra = ngroups -
13483 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13484 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13485 dd->rcv_entries.ngroups,
13486 dd->rcv_entries.nctxt_extra);
13487 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13488 MAX_EAGER_ENTRIES * 2) {
13489 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13490 dd->rcv_entries.group_size;
13492 "RcvArray group count too high, change to %u\n",
13493 dd->rcv_entries.ngroups);
13494 dd->rcv_entries.nctxt_extra = 0;
13497 * PIO send contexts
13499 ret = init_sc_pools_and_sizes(dd);
13500 if (ret >= 0) { /* success */
13501 dd->num_send_contexts = ret;
13504 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13505 dd->chip_send_contexts,
13506 dd->num_send_contexts,
13507 dd->sc_sizes[SC_KERNEL].count,
13508 dd->sc_sizes[SC_ACK].count,
13509 dd->sc_sizes[SC_USER].count,
13510 dd->sc_sizes[SC_VL15].count);
13511 ret = 0; /* success */
13518 * Set the device/port partition key table. The MAD code
13519 * will ensure that, at least, the partial management
13520 * partition key is present in the table.
13522 static void set_partition_keys(struct hfi1_pportdata *ppd)
13524 struct hfi1_devdata *dd = ppd->dd;
13528 dd_dev_info(dd, "Setting partition keys\n");
13529 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13530 reg |= (ppd->pkeys[i] &
13531 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13533 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13534 /* Each register holds 4 PKey values. */
13535 if ((i % 4) == 3) {
13536 write_csr(dd, RCV_PARTITION_KEY +
13537 ((i - 3) * 2), reg);
13542 /* Always enable HW pkeys check when pkeys table is set */
13543 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13547 * These CSRs and memories are uninitialized on reset and must be
13548 * written before reading to set the ECC/parity bits.
13550 * NOTE: All user context CSRs that are not mmaped write-only
13551 * (e.g. the TID flows) must be initialized even if the driver never
13554 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13559 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13560 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13562 /* SendCtxtCreditReturnAddr */
13563 for (i = 0; i < dd->chip_send_contexts; i++)
13564 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13566 /* PIO Send buffers */
13567 /* SDMA Send buffers */
13569 * These are not normally read, and (presently) have no method
13570 * to be read, so are not pre-initialized
13574 /* RcvHdrTailAddr */
13575 /* RcvTidFlowTable */
13576 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13577 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13578 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13579 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13580 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13584 for (i = 0; i < dd->chip_rcv_array_count; i++)
13585 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13587 /* RcvQPMapTable */
13588 for (i = 0; i < 32; i++)
13589 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13593 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13595 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13598 unsigned long timeout;
13601 /* is the condition present? */
13602 reg = read_csr(dd, CCE_STATUS);
13603 if ((reg & status_bits) == 0)
13606 /* clear the condition */
13607 write_csr(dd, CCE_CTRL, ctrl_bits);
13609 /* wait for the condition to clear */
13610 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13612 reg = read_csr(dd, CCE_STATUS);
13613 if ((reg & status_bits) == 0)
13615 if (time_after(jiffies, timeout)) {
13617 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13618 status_bits, reg & status_bits);
13625 /* set CCE CSRs to chip reset defaults */
13626 static void reset_cce_csrs(struct hfi1_devdata *dd)
13630 /* CCE_REVISION read-only */
13631 /* CCE_REVISION2 read-only */
13632 /* CCE_CTRL - bits clear automatically */
13633 /* CCE_STATUS read-only, use CceCtrl to clear */
13634 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13635 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13636 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13637 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13638 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13639 /* CCE_ERR_STATUS read-only */
13640 write_csr(dd, CCE_ERR_MASK, 0);
13641 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13642 /* CCE_ERR_FORCE leave alone */
13643 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13644 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13645 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13646 /* CCE_PCIE_CTRL leave alone */
13647 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13648 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13649 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13650 CCE_MSIX_TABLE_UPPER_RESETCSR);
13652 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13653 /* CCE_MSIX_PBA read-only */
13654 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13655 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13657 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13658 write_csr(dd, CCE_INT_MAP, 0);
13659 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13660 /* CCE_INT_STATUS read-only */
13661 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13662 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13663 /* CCE_INT_FORCE leave alone */
13664 /* CCE_INT_BLOCKED read-only */
13666 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13667 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13670 /* set MISC CSRs to chip reset defaults */
13671 static void reset_misc_csrs(struct hfi1_devdata *dd)
13675 for (i = 0; i < 32; i++) {
13676 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13677 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13678 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13681 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13682 * only be written 128-byte chunks
13684 /* init RSA engine to clear lingering errors */
13685 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13686 write_csr(dd, MISC_CFG_RSA_MU, 0);
13687 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13688 /* MISC_STS_8051_DIGEST read-only */
13689 /* MISC_STS_SBM_DIGEST read-only */
13690 /* MISC_STS_PCIE_DIGEST read-only */
13691 /* MISC_STS_FAB_DIGEST read-only */
13692 /* MISC_ERR_STATUS read-only */
13693 write_csr(dd, MISC_ERR_MASK, 0);
13694 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13695 /* MISC_ERR_FORCE leave alone */
13698 /* set TXE CSRs to chip reset defaults */
13699 static void reset_txe_csrs(struct hfi1_devdata *dd)
13706 write_csr(dd, SEND_CTRL, 0);
13707 __cm_reset(dd, 0); /* reset CM internal state */
13708 /* SEND_CONTEXTS read-only */
13709 /* SEND_DMA_ENGINES read-only */
13710 /* SEND_PIO_MEM_SIZE read-only */
13711 /* SEND_DMA_MEM_SIZE read-only */
13712 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13713 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13714 /* SEND_PIO_ERR_STATUS read-only */
13715 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13716 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13717 /* SEND_PIO_ERR_FORCE leave alone */
13718 /* SEND_DMA_ERR_STATUS read-only */
13719 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13720 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13721 /* SEND_DMA_ERR_FORCE leave alone */
13722 /* SEND_EGRESS_ERR_STATUS read-only */
13723 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13724 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13725 /* SEND_EGRESS_ERR_FORCE leave alone */
13726 write_csr(dd, SEND_BTH_QP, 0);
13727 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13728 write_csr(dd, SEND_SC2VLT0, 0);
13729 write_csr(dd, SEND_SC2VLT1, 0);
13730 write_csr(dd, SEND_SC2VLT2, 0);
13731 write_csr(dd, SEND_SC2VLT3, 0);
13732 write_csr(dd, SEND_LEN_CHECK0, 0);
13733 write_csr(dd, SEND_LEN_CHECK1, 0);
13734 /* SEND_ERR_STATUS read-only */
13735 write_csr(dd, SEND_ERR_MASK, 0);
13736 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13737 /* SEND_ERR_FORCE read-only */
13738 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13739 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13740 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13741 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13742 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13743 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13744 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13745 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13746 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13747 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13748 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13749 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13750 /* SEND_CM_CREDIT_USED_STATUS read-only */
13751 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13752 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13753 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13754 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13755 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13756 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13757 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13758 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13759 /* SEND_CM_CREDIT_USED_VL read-only */
13760 /* SEND_CM_CREDIT_USED_VL15 read-only */
13761 /* SEND_EGRESS_CTXT_STATUS read-only */
13762 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13763 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13764 /* SEND_EGRESS_ERR_INFO read-only */
13765 /* SEND_EGRESS_ERR_SOURCE read-only */
13768 * TXE Per-Context CSRs
13770 for (i = 0; i < dd->chip_send_contexts; i++) {
13771 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13772 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13773 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13774 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13775 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13776 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13777 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13778 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13779 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13780 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13781 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13782 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13786 * TXE Per-SDMA CSRs
13788 for (i = 0; i < dd->chip_sdma_engines; i++) {
13789 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13790 /* SEND_DMA_STATUS read-only */
13791 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13792 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13793 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13794 /* SEND_DMA_HEAD read-only */
13795 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13796 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13797 /* SEND_DMA_IDLE_CNT read-only */
13798 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13799 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13800 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13801 /* SEND_DMA_ENG_ERR_STATUS read-only */
13802 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13803 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13804 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13805 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13806 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13807 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13808 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13809 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13810 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13811 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13817 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13819 static void init_rbufs(struct hfi1_devdata *dd)
13825 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13830 reg = read_csr(dd, RCV_STATUS);
13831 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13832 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13835 * Give up after 1ms - maximum wait time.
13837 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13838 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13839 * 136 KB / (66% * 250MB/s) = 844us
13841 if (count++ > 500) {
13843 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13847 udelay(2); /* do not busy-wait the CSR */
13850 /* start the init - expect RcvCtrl to be 0 */
13851 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13854 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13855 * period after the write before RcvStatus.RxRbufInitDone is valid.
13856 * The delay in the first run through the loop below is sufficient and
13857 * required before the first read of RcvStatus.RxRbufInintDone.
13859 read_csr(dd, RCV_CTRL);
13861 /* wait for the init to finish */
13864 /* delay is required first time through - see above */
13865 udelay(2); /* do not busy-wait the CSR */
13866 reg = read_csr(dd, RCV_STATUS);
13867 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13870 /* give up after 100us - slowest possible at 33MHz is 73us */
13871 if (count++ > 50) {
13873 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13880 /* set RXE CSRs to chip reset defaults */
13881 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13888 write_csr(dd, RCV_CTRL, 0);
13890 /* RCV_STATUS read-only */
13891 /* RCV_CONTEXTS read-only */
13892 /* RCV_ARRAY_CNT read-only */
13893 /* RCV_BUF_SIZE read-only */
13894 write_csr(dd, RCV_BTH_QP, 0);
13895 write_csr(dd, RCV_MULTICAST, 0);
13896 write_csr(dd, RCV_BYPASS, 0);
13897 write_csr(dd, RCV_VL15, 0);
13898 /* this is a clear-down */
13899 write_csr(dd, RCV_ERR_INFO,
13900 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13901 /* RCV_ERR_STATUS read-only */
13902 write_csr(dd, RCV_ERR_MASK, 0);
13903 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13904 /* RCV_ERR_FORCE leave alone */
13905 for (i = 0; i < 32; i++)
13906 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13907 for (i = 0; i < 4; i++)
13908 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13909 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13910 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13911 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13912 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13913 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13914 clear_rsm_rule(dd, i);
13915 for (i = 0; i < 32; i++)
13916 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13919 * RXE Kernel and User Per-Context CSRs
13921 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13923 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13924 /* RCV_CTXT_STATUS read-only */
13925 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13926 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13927 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13928 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13929 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13930 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13931 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13932 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13933 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13934 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13937 /* RCV_HDR_TAIL read-only */
13938 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13939 /* RCV_EGR_INDEX_TAIL read-only */
13940 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13941 /* RCV_EGR_OFFSET_TAIL read-only */
13942 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13943 write_uctxt_csr(dd, i,
13944 RCV_TID_FLOW_TABLE + (8 * j), 0);
13950 * Set sc2vl tables.
13952 * They power on to zeros, so to avoid send context errors
13953 * they need to be set:
13955 * SC 0-7 -> VL 0-7 (respectively)
13960 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13963 /* init per architecture spec, constrained by hardware capability */
13965 /* HFI maps sent packets */
13966 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13972 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13978 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13984 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13991 /* DC maps received packets */
13992 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13994 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13995 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13996 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13998 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13999 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14001 /* initialize the cached sc2vl values consistently with h/w */
14002 for (i = 0; i < 32; i++) {
14003 if (i < 8 || i == 15)
14004 *((u8 *)(dd->sc2vl) + i) = (u8)i;
14006 *((u8 *)(dd->sc2vl) + i) = 0;
14011 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
14012 * depend on the chip going through a power-on reset - a driver may be loaded
14013 * and unloaded many times.
14015 * Do not write any CSR values to the chip in this routine - there may be
14016 * a reset following the (possible) FLR in this routine.
14019 static int init_chip(struct hfi1_devdata *dd)
14025 * Put the HFI CSRs in a known state.
14026 * Combine this with a DC reset.
14028 * Stop the device from doing anything while we do a
14029 * reset. We know there are no other active users of
14030 * the device since we are now in charge. Turn off
14031 * off all outbound and inbound traffic and make sure
14032 * the device does not generate any interrupts.
14035 /* disable send contexts and SDMA engines */
14036 write_csr(dd, SEND_CTRL, 0);
14037 for (i = 0; i < dd->chip_send_contexts; i++)
14038 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14039 for (i = 0; i < dd->chip_sdma_engines; i++)
14040 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14041 /* disable port (turn off RXE inbound traffic) and contexts */
14042 write_csr(dd, RCV_CTRL, 0);
14043 for (i = 0; i < dd->chip_rcv_contexts; i++)
14044 write_csr(dd, RCV_CTXT_CTRL, 0);
14045 /* mask all interrupt sources */
14046 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14047 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14050 * DC Reset: do a full DC reset before the register clear.
14051 * A recommended length of time to hold is one CSR read,
14052 * so reread the CceDcCtrl. Then, hold the DC in reset
14053 * across the clear.
14055 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14056 (void)read_csr(dd, CCE_DC_CTRL);
14060 * A FLR will reset the SPC core and part of the PCIe.
14061 * The parts that need to be restored have already been
14064 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14066 /* do the FLR, the DC reset will remain */
14067 pcie_flr(dd->pcidev);
14069 /* restore command and BARs */
14070 ret = restore_pci_variables(dd);
14072 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14078 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14079 pcie_flr(dd->pcidev);
14080 ret = restore_pci_variables(dd);
14082 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14088 dd_dev_info(dd, "Resetting CSRs with writes\n");
14089 reset_cce_csrs(dd);
14090 reset_txe_csrs(dd);
14091 reset_rxe_csrs(dd);
14092 reset_misc_csrs(dd);
14094 /* clear the DC reset */
14095 write_csr(dd, CCE_DC_CTRL, 0);
14097 /* Set the LED off */
14101 * Clear the QSFP reset.
14102 * An FLR enforces a 0 on all out pins. The driver does not touch
14103 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
14104 * anything plugged constantly in reset, if it pays attention
14106 * Prime examples of this are optical cables. Set all pins high.
14107 * I2CCLK and I2CDAT will change per direction, and INT_N and
14108 * MODPRS_N are input only and their value is ignored.
14110 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14111 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14112 init_chip_resources(dd);
14116 static void init_early_variables(struct hfi1_devdata *dd)
14120 /* assign link credit variables */
14122 dd->link_credits = CM_GLOBAL_CREDITS;
14124 dd->link_credits--;
14125 dd->vcu = cu_to_vcu(hfi1_cu);
14126 /* enough room for 8 MAD packets plus header - 17K */
14127 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14128 if (dd->vl15_init > dd->link_credits)
14129 dd->vl15_init = dd->link_credits;
14131 write_uninitialized_csrs_and_memories(dd);
14133 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14134 for (i = 0; i < dd->num_pports; i++) {
14135 struct hfi1_pportdata *ppd = &dd->pport[i];
14137 set_partition_keys(ppd);
14139 init_sc2vl_tables(dd);
14142 static void init_kdeth_qp(struct hfi1_devdata *dd)
14144 /* user changed the KDETH_QP */
14145 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14146 /* out of range or illegal value */
14147 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14150 if (kdeth_qp == 0) /* not set, or failed range check */
14151 kdeth_qp = DEFAULT_KDETH_QP;
14153 write_csr(dd, SEND_BTH_QP,
14154 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14155 SEND_BTH_QP_KDETH_QP_SHIFT);
14157 write_csr(dd, RCV_BTH_QP,
14158 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14159 RCV_BTH_QP_KDETH_QP_SHIFT);
14164 * @dd - device data
14165 * @first_ctxt - first context
14166 * @last_ctxt - first context
14168 * This return sets the qpn mapping table that
14169 * is indexed by qpn[8:1].
14171 * The routine will round robin the 256 settings
14172 * from first_ctxt to last_ctxt.
14174 * The first/last looks ahead to having specialized
14175 * receive contexts for mgmt and bypass. Normal
14176 * verbs traffic will assumed to be on a range
14177 * of receive contexts.
14179 static void init_qpmap_table(struct hfi1_devdata *dd,
14184 u64 regno = RCV_QP_MAP_TABLE;
14186 u64 ctxt = first_ctxt;
14188 for (i = 0; i < 256; i++) {
14189 reg |= ctxt << (8 * (i % 8));
14191 if (ctxt > last_ctxt)
14194 write_csr(dd, regno, reg);
14200 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14201 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14204 struct rsm_map_table {
14205 u64 map[NUM_MAP_REGS];
14209 struct rsm_rule_data {
14225 * Return an initialized RMT map table for users to fill in. OK if it
14226 * returns NULL, indicating no table.
14228 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14230 struct rsm_map_table *rmt;
14231 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14233 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14235 memset(rmt->map, rxcontext, sizeof(rmt->map));
14243 * Write the final RMT map table to the chip and free the table. OK if
14246 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14247 struct rsm_map_table *rmt)
14252 /* write table to chip */
14253 for (i = 0; i < NUM_MAP_REGS; i++)
14254 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14257 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14262 * Add a receive side mapping rule.
14264 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14265 struct rsm_rule_data *rrd)
14267 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14268 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14269 1ull << rule_index | /* enable bit */
14270 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14271 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14272 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14273 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14274 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14275 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14276 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14277 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14278 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14279 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14280 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14281 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14282 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14286 * Clear a receive side mapping rule.
14288 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14290 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14291 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14292 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14295 /* return the number of RSM map table entries that will be used for QOS */
14296 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14303 /* is QOS active at all? */
14304 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14309 /* determine bits for qpn */
14310 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14311 if (krcvqs[i] > max_by_vl)
14312 max_by_vl = krcvqs[i];
14313 if (max_by_vl > 32)
14315 m = ilog2(__roundup_pow_of_two(max_by_vl));
14317 /* determine bits for vl */
14318 n = ilog2(__roundup_pow_of_two(num_vls));
14320 /* reject if too much is used */
14329 return 1 << (m + n);
14340 * init_qos - init RX qos
14341 * @dd - device data
14342 * @rmt - RSM map table
14344 * This routine initializes Rule 0 and the RSM map table to implement
14345 * quality of service (qos).
14347 * If all of the limit tests succeed, qos is applied based on the array
14348 * interpretation of krcvqs where entry 0 is VL0.
14350 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14351 * feed both the RSM map table and the single rule.
14353 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14355 struct rsm_rule_data rrd;
14356 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14357 unsigned int rmt_entries;
14362 rmt_entries = qos_rmt_entries(dd, &m, &n);
14363 if (rmt_entries == 0)
14365 qpns_per_vl = 1 << m;
14367 /* enough room in the map table? */
14368 rmt_entries = 1 << (m + n);
14369 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14372 /* add qos entries to the the RSM map table */
14373 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14376 for (qpn = 0, tctxt = ctxt;
14377 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14378 unsigned idx, regoff, regidx;
14380 /* generate the index the hardware will produce */
14381 idx = rmt->used + ((qpn << n) ^ i);
14382 regoff = (idx % 8) * 8;
14384 /* replace default with context number */
14385 reg = rmt->map[regidx];
14386 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14388 reg |= (u64)(tctxt++) << regoff;
14389 rmt->map[regidx] = reg;
14390 if (tctxt == ctxt + krcvqs[i])
14396 rrd.offset = rmt->used;
14398 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14399 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14400 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14401 rrd.index1_width = n;
14402 rrd.index2_off = QPN_SELECT_OFFSET;
14403 rrd.index2_width = m + n;
14404 rrd.mask1 = LRH_BTH_MASK;
14405 rrd.value1 = LRH_BTH_VALUE;
14406 rrd.mask2 = LRH_SC_MASK;
14407 rrd.value2 = LRH_SC_VALUE;
14410 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14412 /* mark RSM map entries as used */
14413 rmt->used += rmt_entries;
14414 /* map everything else to the mcast/err/vl15 context */
14415 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14416 dd->qos_shift = n + 1;
14420 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14423 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14424 struct rsm_map_table *rmt)
14426 struct rsm_rule_data rrd;
14428 int i, idx, regoff, regidx;
14431 /* there needs to be enough room in the map table */
14432 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14433 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14438 * RSM will extract the destination context as an index into the
14439 * map table. The destination contexts are a sequential block
14440 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14441 * Map entries are accessed as offset + extracted value. Adjust
14442 * the added offset so this sequence can be placed anywhere in
14443 * the table - as long as the entries themselves do not wrap.
14444 * There are only enough bits in offset for the table size, so
14445 * start with that to allow for a "negative" offset.
14447 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14448 (int)dd->first_dyn_alloc_ctxt);
14450 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14451 i < dd->num_rcv_contexts; i++, idx++) {
14452 /* replace with identity mapping */
14453 regoff = (idx % 8) * 8;
14455 reg = rmt->map[regidx];
14456 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14457 reg |= (u64)i << regoff;
14458 rmt->map[regidx] = reg;
14462 * For RSM intercept of Expected FECN packets:
14463 * o packet type 0 - expected
14464 * o match on F (bit 95), using select/match 1, and
14465 * o match on SH (bit 133), using select/match 2.
14467 * Use index 1 to extract the 8-bit receive context from DestQP
14468 * (start at bit 64). Use that as the RSM map table index.
14470 rrd.offset = offset;
14472 rrd.field1_off = 95;
14473 rrd.field2_off = 133;
14474 rrd.index1_off = 64;
14475 rrd.index1_width = 8;
14476 rrd.index2_off = 0;
14477 rrd.index2_width = 0;
14484 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14486 rmt->used += dd->num_user_contexts;
14489 /* Initialize RSM for VNIC */
14490 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14496 struct rsm_rule_data rrd;
14498 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14499 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14500 dd->vnic.rmt_start);
14504 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14505 dd->vnic.rmt_start,
14506 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14508 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14509 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14510 reg = read_csr(dd, regoff);
14511 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14512 /* Update map register with vnic context */
14513 j = (dd->vnic.rmt_start + i) % 8;
14514 reg &= ~(0xffllu << (j * 8));
14515 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14516 /* Wrap up vnic ctx index */
14517 ctx_id %= dd->vnic.num_ctxt;
14518 /* Write back map register */
14519 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14520 dev_dbg(&(dd)->pcidev->dev,
14521 "Vnic rsm map reg[%d] =0x%llx\n",
14522 regoff - RCV_RSM_MAP_TABLE, reg);
14524 write_csr(dd, regoff, reg);
14526 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14527 reg = read_csr(dd, regoff);
14531 /* Add rule for vnic */
14532 rrd.offset = dd->vnic.rmt_start;
14534 /* Match 16B packets */
14535 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14536 rrd.mask1 = L2_TYPE_MASK;
14537 rrd.value1 = L2_16B_VALUE;
14538 /* Match ETH L4 packets */
14539 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14540 rrd.mask2 = L4_16B_TYPE_MASK;
14541 rrd.value2 = L4_16B_ETH_VALUE;
14542 /* Calc context from veswid and entropy */
14543 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14544 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14545 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14546 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14547 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14549 /* Enable RSM if not already enabled */
14550 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14553 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14555 clear_rsm_rule(dd, RSM_INS_VNIC);
14557 /* Disable RSM if used only by vnic */
14558 if (dd->vnic.rmt_start == 0)
14559 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14562 static void init_rxe(struct hfi1_devdata *dd)
14564 struct rsm_map_table *rmt;
14567 /* enable all receive errors */
14568 write_csr(dd, RCV_ERR_MASK, ~0ull);
14570 rmt = alloc_rsm_map_table(dd);
14571 /* set up QOS, including the QPN map table */
14573 init_user_fecn_handling(dd, rmt);
14574 complete_rsm_map_table(dd, rmt);
14575 /* record number of used rsm map entries for vnic */
14576 dd->vnic.rmt_start = rmt->used;
14580 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14581 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14582 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14583 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14584 * Max_PayLoad_Size set to its minimum of 128.
14586 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14587 * (64 bytes). Max_Payload_Size is possibly modified upward in
14588 * tune_pcie_caps() which is called after this routine.
14591 /* Have 16 bytes (4DW) of bypass header available in header queue */
14592 val = read_csr(dd, RCV_BYPASS);
14593 val |= (4ull << 16);
14594 write_csr(dd, RCV_BYPASS, val);
14597 static void init_other(struct hfi1_devdata *dd)
14599 /* enable all CCE errors */
14600 write_csr(dd, CCE_ERR_MASK, ~0ull);
14601 /* enable *some* Misc errors */
14602 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14603 /* enable all DC errors, except LCB */
14604 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14605 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14609 * Fill out the given AU table using the given CU. A CU is defined in terms
14610 * AUs. The table is a an encoding: given the index, how many AUs does that
14613 * NOTE: Assumes that the register layout is the same for the
14614 * local and remote tables.
14616 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14617 u32 csr0to3, u32 csr4to7)
14619 write_csr(dd, csr0to3,
14620 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14621 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14623 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14625 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14626 write_csr(dd, csr4to7,
14628 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14630 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14632 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14634 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14637 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14639 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14640 SEND_CM_LOCAL_AU_TABLE4_TO7);
14643 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14645 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14646 SEND_CM_REMOTE_AU_TABLE4_TO7);
14649 static void init_txe(struct hfi1_devdata *dd)
14653 /* enable all PIO, SDMA, general, and Egress errors */
14654 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14655 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14656 write_csr(dd, SEND_ERR_MASK, ~0ull);
14657 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14659 /* enable all per-context and per-SDMA engine errors */
14660 for (i = 0; i < dd->chip_send_contexts; i++)
14661 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14662 for (i = 0; i < dd->chip_sdma_engines; i++)
14663 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14665 /* set the local CU to AU mapping */
14666 assign_local_cm_au_table(dd, dd->vcu);
14669 * Set reasonable default for Credit Return Timer
14670 * Don't set on Simulator - causes it to choke.
14672 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14673 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14676 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14682 if (!rcd || !rcd->sc)
14685 hw_ctxt = rcd->sc->hw_context;
14686 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14687 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14688 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14689 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14690 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14691 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14692 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14694 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14697 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14698 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14699 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14702 /* Enable J_KEY check on receive context. */
14703 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14704 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14705 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14706 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14711 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14716 if (!rcd || !rcd->sc)
14719 hw_ctxt = rcd->sc->hw_context;
14720 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14722 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14723 * This check would not have been enabled for A0 h/w, see
14727 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14728 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14729 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14731 /* Turn off the J_KEY on the receive side */
14732 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14737 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14743 if (!rcd || !rcd->sc)
14746 hw_ctxt = rcd->sc->hw_context;
14747 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14748 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14749 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14750 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14751 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14752 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14753 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14758 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14763 if (!ctxt || !ctxt->sc)
14766 hw_ctxt = ctxt->sc->hw_context;
14767 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14768 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14769 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14770 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14776 * Start doing the clean up the the chip. Our clean up happens in multiple
14777 * stages and this is just the first.
14779 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14784 finish_chip_resources(dd);
14787 #define HFI_BASE_GUID(dev) \
14788 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14791 * Information can be shared between the two HFIs on the same ASIC
14792 * in the same OS. This function finds the peer device and sets
14793 * up a shared structure.
14795 static int init_asic_data(struct hfi1_devdata *dd)
14797 unsigned long flags;
14798 struct hfi1_devdata *tmp, *peer = NULL;
14799 struct hfi1_asic_data *asic_data;
14802 /* pre-allocate the asic structure in case we are the first device */
14803 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14807 spin_lock_irqsave(&hfi1_devs_lock, flags);
14808 /* Find our peer device */
14809 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14810 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14811 dd->unit != tmp->unit) {
14818 /* use already allocated structure */
14819 dd->asic_data = peer->asic_data;
14822 dd->asic_data = asic_data;
14823 mutex_init(&dd->asic_data->asic_resource_mutex);
14825 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14826 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14828 /* first one through - set up i2c devices */
14830 ret = set_up_i2c(dd, dd->asic_data);
14836 * Set dd->boardname. Use a generic name if a name is not returned from
14837 * EFI variable space.
14839 * Return 0 on success, -ENOMEM if space could not be allocated.
14841 static int obtain_boardname(struct hfi1_devdata *dd)
14843 /* generic board description */
14844 const char generic[] =
14845 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14846 unsigned long size;
14849 ret = read_hfi1_efi_var(dd, "description", &size,
14850 (void **)&dd->boardname);
14852 dd_dev_info(dd, "Board description not found\n");
14853 /* use generic description */
14854 dd->boardname = kstrdup(generic, GFP_KERNEL);
14855 if (!dd->boardname)
14862 * Check the interrupt registers to make sure that they are mapped correctly.
14863 * It is intended to help user identify any mismapping by VMM when the driver
14864 * is running in a VM. This function should only be called before interrupt
14865 * is set up properly.
14867 * Return 0 on success, -EINVAL on failure.
14869 static int check_int_registers(struct hfi1_devdata *dd)
14872 u64 all_bits = ~(u64)0;
14875 /* Clear CceIntMask[0] to avoid raising any interrupts */
14876 mask = read_csr(dd, CCE_INT_MASK);
14877 write_csr(dd, CCE_INT_MASK, 0ull);
14878 reg = read_csr(dd, CCE_INT_MASK);
14882 /* Clear all interrupt status bits */
14883 write_csr(dd, CCE_INT_CLEAR, all_bits);
14884 reg = read_csr(dd, CCE_INT_STATUS);
14888 /* Set all interrupt status bits */
14889 write_csr(dd, CCE_INT_FORCE, all_bits);
14890 reg = read_csr(dd, CCE_INT_STATUS);
14891 if (reg != all_bits)
14894 /* Restore the interrupt mask */
14895 write_csr(dd, CCE_INT_CLEAR, all_bits);
14896 write_csr(dd, CCE_INT_MASK, mask);
14900 write_csr(dd, CCE_INT_MASK, mask);
14901 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14906 * Allocate and initialize the device structure for the hfi.
14907 * @dev: the pci_dev for hfi1_ib device
14908 * @ent: pci_device_id struct for this dev
14910 * Also allocates, initializes, and returns the devdata struct for this
14913 * This is global, and is called directly at init to set up the
14914 * chip-specific function pointers for later use.
14916 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14917 const struct pci_device_id *ent)
14919 struct hfi1_devdata *dd;
14920 struct hfi1_pportdata *ppd;
14923 static const char * const inames[] = { /* implementation names */
14925 "RTL VCS simulation",
14926 "RTL FPGA emulation",
14927 "Functional simulator"
14929 struct pci_dev *parent = pdev->bus->self;
14931 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14932 sizeof(struct hfi1_pportdata));
14936 for (i = 0; i < dd->num_pports; i++, ppd++) {
14938 /* init common fields */
14939 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14940 /* DC supports 4 link widths */
14941 ppd->link_width_supported =
14942 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14943 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14944 ppd->link_width_downgrade_supported =
14945 ppd->link_width_supported;
14946 /* start out enabling only 4X */
14947 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14948 ppd->link_width_downgrade_enabled =
14949 ppd->link_width_downgrade_supported;
14950 /* link width active is 0 when link is down */
14951 /* link width downgrade active is 0 when link is down */
14953 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14954 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14955 hfi1_early_err(&pdev->dev,
14956 "Invalid num_vls %u, using %u VLs\n",
14957 num_vls, HFI1_MAX_VLS_SUPPORTED);
14958 num_vls = HFI1_MAX_VLS_SUPPORTED;
14960 ppd->vls_supported = num_vls;
14961 ppd->vls_operational = ppd->vls_supported;
14962 /* Set the default MTU. */
14963 for (vl = 0; vl < num_vls; vl++)
14964 dd->vld[vl].mtu = hfi1_max_mtu;
14965 dd->vld[15].mtu = MAX_MAD_PACKET;
14967 * Set the initial values to reasonable default, will be set
14968 * for real when link is up.
14970 ppd->overrun_threshold = 0x4;
14971 ppd->phy_error_threshold = 0xf;
14972 ppd->port_crc_mode_enabled = link_crc_mask;
14973 /* initialize supported LTP CRC mode */
14974 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14975 /* initialize enabled LTP CRC mode */
14976 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14977 /* start in offline */
14978 ppd->host_link_state = HLS_DN_OFFLINE;
14979 init_vl_arb_caches(ppd);
14982 dd->link_default = HLS_DN_POLL;
14985 * Do remaining PCIe setup and save PCIe values in dd.
14986 * Any error printing is already done by the init code.
14987 * On return, we have the chip mapped.
14989 ret = hfi1_pcie_ddinit(dd, pdev);
14993 /* Save PCI space registers to rewrite after device reset */
14994 ret = save_pci_variables(dd);
14998 /* verify that reads actually work, save revision for reset check */
14999 dd->revision = read_csr(dd, CCE_REVISION);
15000 if (dd->revision == ~(u64)0) {
15001 dd_dev_err(dd, "cannot read chip CSRs\n");
15005 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15006 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15007 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15008 & CCE_REVISION_CHIP_REV_MINOR_MASK;
15011 * Check interrupt registers mapping if the driver has no access to
15012 * the upstream component. In this case, it is likely that the driver
15013 * is running in a VM.
15016 ret = check_int_registers(dd);
15022 * obtain the hardware ID - NOT related to unit, which is a
15023 * software enumeration
15025 reg = read_csr(dd, CCE_REVISION2);
15026 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15027 & CCE_REVISION2_HFI_ID_MASK;
15028 /* the variable size will remove unwanted bits */
15029 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15030 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15031 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15032 dd->icode < ARRAY_SIZE(inames) ?
15033 inames[dd->icode] : "unknown", (int)dd->irev);
15035 /* speeds the hardware can support */
15036 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15037 /* speeds allowed to run at */
15038 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15039 /* give a reasonable active value, will be set on link up */
15040 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15042 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
15043 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
15044 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
15045 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
15046 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
15047 /* fix up link widths for emulation _p */
15049 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15050 ppd->link_width_supported =
15051 ppd->link_width_enabled =
15052 ppd->link_width_downgrade_supported =
15053 ppd->link_width_downgrade_enabled =
15056 /* insure num_vls isn't larger than number of sdma engines */
15057 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
15058 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15059 num_vls, dd->chip_sdma_engines);
15060 num_vls = dd->chip_sdma_engines;
15061 ppd->vls_supported = dd->chip_sdma_engines;
15062 ppd->vls_operational = ppd->vls_supported;
15066 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15067 * Limit the max if larger than the field holds. If timeout is
15068 * non-zero, then the calculated field will be at least 1.
15070 * Must be after icode is set up - the cclock rate depends
15071 * on knowing the hardware being used.
15073 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15074 if (dd->rcv_intr_timeout_csr >
15075 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15076 dd->rcv_intr_timeout_csr =
15077 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15078 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15079 dd->rcv_intr_timeout_csr = 1;
15081 /* needs to be done before we look for the peer device */
15084 /* set up shared ASIC data with peer device */
15085 ret = init_asic_data(dd);
15089 /* obtain chip sizes, reset chip CSRs */
15090 ret = init_chip(dd);
15094 /* read in the PCIe link speed information */
15095 ret = pcie_speeds(dd);
15099 /* call before get_platform_config(), after init_chip_resources() */
15100 ret = eprom_init(dd);
15102 goto bail_free_rcverr;
15104 /* Needs to be called before hfi1_firmware_init */
15105 get_platform_config(dd);
15107 /* read in firmware */
15108 ret = hfi1_firmware_init(dd);
15113 * In general, the PCIe Gen3 transition must occur after the
15114 * chip has been idled (so it won't initiate any PCIe transactions
15115 * e.g. an interrupt) and before the driver changes any registers
15116 * (the transition will reset the registers).
15118 * In particular, place this call after:
15119 * - init_chip() - the chip will not initiate any PCIe transactions
15120 * - pcie_speeds() - reads the current link speed
15121 * - hfi1_firmware_init() - the needed firmware is ready to be
15124 ret = do_pcie_gen3_transition(dd);
15128 /* start setting dd values and adjusting CSRs */
15129 init_early_variables(dd);
15131 parse_platform_config(dd);
15133 ret = obtain_boardname(dd);
15137 snprintf(dd->boardversion, BOARD_VERS_MAX,
15138 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15139 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15142 (dd->revision >> CCE_REVISION_SW_SHIFT)
15143 & CCE_REVISION_SW_MASK);
15145 ret = set_up_context_variables(dd);
15149 /* set initial RXE CSRs */
15151 /* set initial TXE CSRs */
15153 /* set initial non-RXE, non-TXE CSRs */
15155 /* set up KDETH QP prefix in both RX and TX CSRs */
15158 ret = hfi1_dev_affinity_init(dd);
15162 /* send contexts must be set up before receive contexts */
15163 ret = init_send_contexts(dd);
15167 ret = hfi1_create_kctxts(dd);
15172 * Initialize aspm, to be done after gen3 transition and setting up
15173 * contexts and before enabling interrupts
15177 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15179 * rcd[0] is guaranteed to be valid by this point. Also, all
15180 * context are using the same value, as per the module parameter.
15182 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15184 ret = init_pervl_scs(dd);
15189 for (i = 0; i < dd->num_pports; ++i) {
15190 ret = sdma_init(dd, i);
15195 /* use contexts created by hfi1_create_kctxts */
15196 ret = set_up_interrupts(dd);
15200 /* set up LCB access - must be after set_up_interrupts() */
15201 init_lcb_access(dd);
15204 * Serial number is created from the base guid:
15205 * [27:24] = base guid [38:35]
15206 * [23: 0] = base guid [23: 0]
15208 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15209 (dd->base_guid & 0xFFFFFF) |
15210 ((dd->base_guid >> 11) & 0xF000000));
15212 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15213 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15214 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15216 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15218 goto bail_clear_intr;
15222 ret = init_cntrs(dd);
15224 goto bail_clear_intr;
15226 ret = init_rcverr(dd);
15228 goto bail_free_cntrs;
15230 init_completion(&dd->user_comp);
15232 /* The user refcount starts with one to inidicate an active device */
15233 atomic_set(&dd->user_refcount, 1);
15242 hfi1_clean_up_interrupts(dd);
15244 hfi1_pcie_ddcleanup(dd);
15246 hfi1_free_devdata(dd);
15252 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15256 u32 current_egress_rate = ppd->current_egress_rate;
15257 /* rates here are in units of 10^6 bits/sec */
15259 if (desired_egress_rate == -1)
15260 return 0; /* shouldn't happen */
15262 if (desired_egress_rate >= current_egress_rate)
15263 return 0; /* we can't help go faster, only slower */
15265 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15266 egress_cycles(dw_len * 4, current_egress_rate);
15268 return (u16)delta_cycles;
15272 * create_pbc - build a pbc for transmission
15273 * @flags: special case flags or-ed in built pbc
15274 * @srate: static rate
15276 * @dwlen: dword length (header words + data words + pbc words)
15278 * Create a PBC with the given flags, rate, VL, and length.
15280 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15281 * for verbs, which does not use this PSM feature. The lone other caller
15282 * is for the diagnostic interface which calls this if the user does not
15283 * supply their own PBC.
15285 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15288 u64 pbc, delay = 0;
15290 if (unlikely(srate_mbs))
15291 delay = delay_cycles(ppd, srate_mbs, dw_len);
15294 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15295 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15296 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15297 | (dw_len & PBC_LENGTH_DWS_MASK)
15298 << PBC_LENGTH_DWS_SHIFT;
15303 #define SBUS_THERMAL 0x4f
15304 #define SBUS_THERM_MONITOR_MODE 0x1
15306 #define THERM_FAILURE(dev, ret, reason) \
15308 "Thermal sensor initialization failed: %s (%d)\n", \
15312 * Initialize the thermal sensor.
15314 * After initialization, enable polling of thermal sensor through
15315 * SBus interface. In order for this to work, the SBus Master
15316 * firmware has to be loaded due to the fact that the HW polling
15317 * logic uses SBus interrupts, which are not supported with
15318 * default firmware. Otherwise, no data will be returned through
15319 * the ASIC_STS_THERM CSR.
15321 static int thermal_init(struct hfi1_devdata *dd)
15325 if (dd->icode != ICODE_RTL_SILICON ||
15326 check_chip_resource(dd, CR_THERM_INIT, NULL))
15329 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15331 THERM_FAILURE(dd, ret, "Acquire SBus");
15335 dd_dev_info(dd, "Initializing thermal sensor\n");
15336 /* Disable polling of thermal readings */
15337 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15339 /* Thermal Sensor Initialization */
15340 /* Step 1: Reset the Thermal SBus Receiver */
15341 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15342 RESET_SBUS_RECEIVER, 0);
15344 THERM_FAILURE(dd, ret, "Bus Reset");
15347 /* Step 2: Set Reset bit in Thermal block */
15348 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15349 WRITE_SBUS_RECEIVER, 0x1);
15351 THERM_FAILURE(dd, ret, "Therm Block Reset");
15354 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15355 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15356 WRITE_SBUS_RECEIVER, 0x32);
15358 THERM_FAILURE(dd, ret, "Write Clock Div");
15361 /* Step 4: Select temperature mode */
15362 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15363 WRITE_SBUS_RECEIVER,
15364 SBUS_THERM_MONITOR_MODE);
15366 THERM_FAILURE(dd, ret, "Write Mode Sel");
15369 /* Step 5: De-assert block reset and start conversion */
15370 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15371 WRITE_SBUS_RECEIVER, 0x2);
15373 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15376 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15379 /* Enable polling of thermal readings */
15380 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15382 /* Set initialized flag */
15383 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15385 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15388 release_chip_resource(dd, CR_SBUS);
15392 static void handle_temp_err(struct hfi1_devdata *dd)
15394 struct hfi1_pportdata *ppd = &dd->pport[0];
15396 * Thermal Critical Interrupt
15397 * Put the device into forced freeze mode, take link down to
15398 * offline, and put DC into reset.
15401 "Critical temperature reached! Forcing device into freeze mode!\n");
15402 dd->flags |= HFI1_FORCED_FREEZE;
15403 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15405 * Shut DC down as much and as quickly as possible.
15407 * Step 1: Take the link down to OFFLINE. This will cause the
15408 * 8051 to put the Serdes in reset. However, we don't want to
15409 * go through the entire link state machine since we want to
15410 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15411 * but rather an attempt to save the chip.
15412 * Code below is almost the same as quiet_serdes() but avoids
15413 * all the extra work and the sleeps.
15415 ppd->driver_link_ready = 0;
15416 ppd->link_enabled = 0;
15417 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15420 * Step 2: Shutdown LCB and 8051
15421 * After shutdown, do not restore DC_CFG_RESET value.