1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
4 * Copyright(c) 2021 Cornelis Networks.
8 * This file contains all of the code that is specific to the HFI chip
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
30 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
31 module_param(num_vls, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
35 * Default time to aggregate two 10K packets from the idle state
36 * (timer not running). The timer starts at the end of the first packet,
37 * so only the time for one 10K packet and header plus a bit extra is needed.
38 * 10 * 1024 + 64 header byte = 10304 byte
39 * 10304 byte / 12.5 GB/s = 824.32ns
41 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
42 module_param(rcv_intr_timeout, uint, S_IRUGO);
43 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
45 uint rcv_intr_count = 16; /* same as qib */
46 module_param(rcv_intr_count, uint, S_IRUGO);
47 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
49 ushort link_crc_mask = SUPPORTED_CRCS;
50 module_param(link_crc_mask, ushort, S_IRUGO);
51 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
54 module_param_named(loopback, loopback, uint, S_IRUGO);
55 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
57 /* Other driver tunables */
58 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
59 static ushort crc_14b_sideband = 1;
60 static uint use_flr = 1;
61 uint quick_linkup; /* skip LNI */
64 u64 flag; /* the flag */
65 char *str; /* description string */
66 u16 extra; /* extra information */
71 /* str must be a string constant */
72 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
73 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
75 /* Send Error Consequences */
76 #define SEC_WRITE_DROPPED 0x1
77 #define SEC_PACKET_DROPPED 0x2
78 #define SEC_SC_HALTED 0x4 /* per-context only */
79 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
81 #define DEFAULT_KRCVQS 2
82 #define MIN_KERNEL_KCTXTS 2
83 #define FIRST_KERNEL_KCTXT 1
86 * RSM instance allocation
87 * 0 - User Fecn Handling
92 #define RSM_INS_FECN 0
93 #define RSM_INS_VNIC 1
95 #define RSM_INS_VERBS 3
97 /* Bit offset into the GUID which carries HFI id information */
98 #define GUID_HFI_INDEX_SHIFT 39
100 /* extract the emulation revision */
101 #define emulator_rev(dd) ((dd)->irev >> 8)
102 /* parallel and serial emulation versions are 3 and 4 respectively */
103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
106 /* RSM fields for Verbs */
108 #define IB_PACKET_TYPE 2ull
109 #define QW_SHIFT 6ull
111 #define QPN_WIDTH 7ull
113 /* LRH.BTH: QW 0, OFFSET 48 - for match */
114 #define LRH_BTH_QW 0ull
115 #define LRH_BTH_BIT_OFFSET 48ull
116 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
117 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
118 #define LRH_BTH_SELECT
119 #define LRH_BTH_MASK 3ull
120 #define LRH_BTH_VALUE 2ull
122 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
123 #define LRH_SC_QW 0ull
124 #define LRH_SC_BIT_OFFSET 56ull
125 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
126 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
127 #define LRH_SC_MASK 128ull
128 #define LRH_SC_VALUE 0ull
130 /* SC[n..0] QW 0, OFFSET 60 - for select */
131 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
133 /* QPN[m+n:1] QW 1, OFFSET 1 */
134 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
136 /* RSM fields for AIP */
137 /* LRH.BTH above is reused for this rule */
139 /* BTH.DESTQP: QW 1, OFFSET 16 for match */
140 #define BTH_DESTQP_QW 1ull
141 #define BTH_DESTQP_BIT_OFFSET 16ull
142 #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
143 #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
144 #define BTH_DESTQP_MASK 0xFFull
145 #define BTH_DESTQP_VALUE 0x81ull
147 /* DETH.SQPN: QW 1 Offset 56 for select */
148 /* We use 8 most significant Soure QPN bits as entropy fpr AIP */
149 #define DETH_AIP_SQPN_QW 3ull
150 #define DETH_AIP_SQPN_BIT_OFFSET 56ull
151 #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
152 #define DETH_AIP_SQPN_SELECT_OFFSET \
153 DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
155 /* RSM fields for Vnic */
156 /* L2_TYPE: QW 0, OFFSET 61 - for match */
157 #define L2_TYPE_QW 0ull
158 #define L2_TYPE_BIT_OFFSET 61ull
159 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
160 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
161 #define L2_TYPE_MASK 3ull
162 #define L2_16B_VALUE 2ull
164 /* L4_TYPE QW 1, OFFSET 0 - for match */
165 #define L4_TYPE_QW 1ull
166 #define L4_TYPE_BIT_OFFSET 0ull
167 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
168 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
169 #define L4_16B_TYPE_MASK 0xFFull
170 #define L4_16B_ETH_VALUE 0x78ull
172 /* 16B VESWID - for select */
173 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
174 /* 16B ENTROPY - for select */
175 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
177 /* defines to build power on SC2VL table */
189 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
190 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
191 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
192 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
193 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
194 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
195 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
196 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
199 #define DC_SC_VL_VAL( \
218 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
219 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
220 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
221 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
222 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
223 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
224 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
225 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
226 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
227 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
228 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
229 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
230 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
231 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
232 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
233 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
236 /* all CceStatus sub-block freeze bits */
237 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
238 | CCE_STATUS_RXE_FROZE_SMASK \
239 | CCE_STATUS_TXE_FROZE_SMASK \
240 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
241 /* all CceStatus sub-block TXE pause bits */
242 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
243 | CCE_STATUS_TXE_PAUSED_SMASK \
244 | CCE_STATUS_SDMA_PAUSED_SMASK)
245 /* all CceStatus sub-block RXE pause bits */
246 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
248 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
249 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
254 static struct flag_table cce_err_status_flags[] = {
255 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
256 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
257 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
258 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
259 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
260 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
261 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
263 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
264 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
265 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
266 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
267 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
268 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
269 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
270 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
271 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
272 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
273 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
274 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
275 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
276 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
277 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
278 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
279 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
280 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
281 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
282 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
283 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
284 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
285 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
286 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
287 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
288 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
289 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
290 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
291 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
292 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
293 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
294 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
295 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
296 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
297 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
298 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
299 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
300 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
301 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
302 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
303 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
304 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
305 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
306 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
307 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
308 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
309 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
310 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
311 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
312 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
313 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
314 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
315 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
316 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
317 /*31*/ FLAG_ENTRY0("LATriggered",
318 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
319 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
320 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
321 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
322 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
323 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
324 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
325 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
326 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
327 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
328 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
329 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
330 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
331 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
332 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
333 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
334 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
335 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
336 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
343 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
344 static struct flag_table misc_err_status_flags[] = {
345 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
346 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
347 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
348 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
349 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
350 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
351 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
352 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
353 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
354 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
355 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
356 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
357 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
361 * TXE PIO Error flags and consequences
363 static struct flag_table pio_err_status_flags[] = {
364 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
366 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
367 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
369 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
370 /* 2*/ FLAG_ENTRY("PioCsrParity",
372 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
373 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
375 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
376 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
378 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
379 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
381 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
382 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
384 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
385 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
387 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
388 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
390 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
391 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
393 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
394 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
396 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
397 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
400 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
402 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
403 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
405 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
406 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
408 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
409 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
411 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
412 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
414 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
415 /*17*/ FLAG_ENTRY("PioInitSmIn",
417 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
418 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
420 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
421 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
423 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
424 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
426 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
427 /*21*/ FLAG_ENTRY("PioWriteDataParity",
429 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
430 /*22*/ FLAG_ENTRY("PioStateMachine",
432 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
433 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
434 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
436 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
437 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
439 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
441 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
442 /*26*/ FLAG_ENTRY("PioVlfSopParity",
444 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
445 /*27*/ FLAG_ENTRY("PioVlFifoParity",
447 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
448 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
450 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
451 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
453 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
455 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
457 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
458 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
460 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
461 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
463 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
464 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
466 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
470 /* TXE PIO errors that cause an SPC freeze */
471 #define ALL_PIO_FREEZE_ERR \
472 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
495 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
496 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
503 * TXE SDMA Error flags
505 static struct flag_table sdma_err_status_flags[] = {
506 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
507 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
508 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
509 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
510 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
511 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
512 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
513 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
517 /* TXE SDMA errors that cause an SPC freeze */
518 #define ALL_SDMA_FREEZE_ERR \
519 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
520 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
521 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
523 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
524 #define PORT_DISCARD_EGRESS_ERRS \
525 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
526 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
527 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
530 * TXE Egress Error flags
532 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
533 static struct flag_table egress_err_status_flags[] = {
534 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
535 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
537 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
538 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
539 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
540 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
542 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
543 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
544 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
545 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
547 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
548 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
549 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
550 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
551 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
552 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
553 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
554 SEES(TX_SDMA0_DISALLOWED_PACKET)),
555 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
556 SEES(TX_SDMA1_DISALLOWED_PACKET)),
557 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
558 SEES(TX_SDMA2_DISALLOWED_PACKET)),
559 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
560 SEES(TX_SDMA3_DISALLOWED_PACKET)),
561 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
562 SEES(TX_SDMA4_DISALLOWED_PACKET)),
563 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
564 SEES(TX_SDMA5_DISALLOWED_PACKET)),
565 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
566 SEES(TX_SDMA6_DISALLOWED_PACKET)),
567 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
568 SEES(TX_SDMA7_DISALLOWED_PACKET)),
569 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
570 SEES(TX_SDMA8_DISALLOWED_PACKET)),
571 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
572 SEES(TX_SDMA9_DISALLOWED_PACKET)),
573 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
574 SEES(TX_SDMA10_DISALLOWED_PACKET)),
575 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
576 SEES(TX_SDMA11_DISALLOWED_PACKET)),
577 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
578 SEES(TX_SDMA12_DISALLOWED_PACKET)),
579 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
580 SEES(TX_SDMA13_DISALLOWED_PACKET)),
581 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
582 SEES(TX_SDMA14_DISALLOWED_PACKET)),
583 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
584 SEES(TX_SDMA15_DISALLOWED_PACKET)),
585 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
587 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
589 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
591 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
593 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
594 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
595 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
596 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
597 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
598 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
599 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
600 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
601 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
602 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
603 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
604 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
605 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
606 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
607 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
608 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
609 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
610 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
611 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
612 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
613 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
614 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
615 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
616 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
617 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
618 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
619 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
620 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
621 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
622 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
623 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
624 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
625 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
626 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
627 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
631 * TXE Egress Error Info flags
633 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
634 static struct flag_table egress_err_info_flags[] = {
635 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
636 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
637 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
638 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
639 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
640 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
641 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
642 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
643 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
644 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
645 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
646 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
647 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
648 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
649 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
650 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
651 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
652 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
653 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
654 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
655 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
656 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
659 /* TXE Egress errors that cause an SPC freeze */
660 #define ALL_TXE_EGRESS_FREEZE_ERR \
661 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
662 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
663 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
664 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
665 | SEES(TX_LAUNCH_CSR_PARITY) \
666 | SEES(TX_SBRD_CTL_CSR_PARITY) \
667 | SEES(TX_CONFIG_PARITY) \
668 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
671 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
672 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
673 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
674 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
675 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
676 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
677 | SEES(TX_CREDIT_RETURN_PARITY))
680 * TXE Send error flags
682 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
683 static struct flag_table send_err_status_flags[] = {
684 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
685 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
686 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
690 * TXE Send Context Error flags and consequences
692 static struct flag_table sc_err_status_flags[] = {
693 /* 0*/ FLAG_ENTRY("InconsistentSop",
694 SEC_PACKET_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
696 /* 1*/ FLAG_ENTRY("DisallowedPacket",
697 SEC_PACKET_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
699 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
700 SEC_WRITE_DROPPED | SEC_SC_HALTED,
701 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
702 /* 3*/ FLAG_ENTRY("WriteOverflow",
703 SEC_WRITE_DROPPED | SEC_SC_HALTED,
704 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
705 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
706 SEC_WRITE_DROPPED | SEC_SC_HALTED,
707 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
712 * RXE Receive Error flags
714 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
715 static struct flag_table rxe_err_status_flags[] = {
716 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
717 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
718 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
719 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
720 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
721 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
722 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
723 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
724 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
725 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
726 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
727 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
728 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
729 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
730 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
731 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
732 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
733 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
734 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
735 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
736 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
737 RXES(RBUF_BLOCK_LIST_READ_UNC)),
738 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
739 RXES(RBUF_BLOCK_LIST_READ_COR)),
740 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
741 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
742 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
743 RXES(RBUF_CSR_QENT_CNT_PARITY)),
744 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
745 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
746 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
747 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
748 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
749 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
750 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
751 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
752 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
753 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
754 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
755 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
756 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
757 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
758 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
759 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
760 RXES(RBUF_FL_INITDONE_PARITY)),
761 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
762 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
763 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
764 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
765 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
766 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
767 RXES(LOOKUP_DES_PART1_UNC_COR)),
768 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
769 RXES(LOOKUP_DES_PART2_PARITY)),
770 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
771 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
772 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
773 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
774 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
775 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
776 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
777 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
778 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
779 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
780 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
781 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
782 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
783 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
784 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
785 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
786 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
787 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
788 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
789 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
790 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
791 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
794 /* RXE errors that will trigger an SPC freeze */
795 #define ALL_RXE_FREEZE_ERR \
796 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
841 #define RXE_FREEZE_ABORT_MASK \
842 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
843 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
844 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
849 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
850 static struct flag_table dcc_err_flags[] = {
851 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
852 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
853 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
854 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
855 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
856 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
857 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
858 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
859 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
860 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
861 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
862 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
863 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
864 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
865 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
866 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
867 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
868 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
869 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
870 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
871 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
872 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
873 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
874 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
875 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
876 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
877 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
878 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
879 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
880 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
881 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
882 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
883 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
884 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
885 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
886 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
887 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
888 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
889 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
890 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
891 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
892 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
893 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
894 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
895 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
896 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
902 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
903 static struct flag_table lcb_err_flags[] = {
904 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
905 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
906 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
907 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
908 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
909 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
910 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
911 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
912 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
913 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
914 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
915 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
916 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
917 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
918 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
919 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
920 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
921 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
922 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
923 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
924 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
925 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
926 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
927 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
928 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
929 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
930 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
931 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
932 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
933 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
934 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
935 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
936 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
937 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
938 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
939 LCBE(REDUNDANT_FLIT_PARITY_ERR))
945 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
946 static struct flag_table dc8051_err_flags[] = {
947 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
948 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
949 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
950 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
951 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
952 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
953 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
954 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
955 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
956 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
957 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
961 * DC8051 Information Error flags
963 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
965 static struct flag_table dc8051_info_err_flags[] = {
966 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
967 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
968 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
969 FLAG_ENTRY0("Serdes internal loopback failure",
970 FAILED_SERDES_INTERNAL_LOOPBACK),
971 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
972 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
973 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
974 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
975 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
976 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
977 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
978 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
979 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
980 FLAG_ENTRY0("External Device Request Timeout",
981 EXTERNAL_DEVICE_REQ_TIMEOUT),
985 * DC8051 Information Host Information flags
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
989 static struct flag_table dc8051_info_host_msg_flags[] = {
990 FLAG_ENTRY0("Host request done", 0x0001),
991 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
992 FLAG_ENTRY0("BC SMA message", 0x0004),
993 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
994 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
995 FLAG_ENTRY0("External device config request", 0x0020),
996 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
997 FLAG_ENTRY0("LinkUp achieved", 0x0080),
998 FLAG_ENTRY0("Link going down", 0x0100),
999 FLAG_ENTRY0("Link width downgraded", 0x0200),
1002 static u32 encoded_size(u32 size);
1003 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1004 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1005 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1007 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1008 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1009 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1010 u8 *remote_tx_rate, u16 *link_widths);
1011 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1012 u8 *flag_bits, u16 *link_widths);
1013 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1015 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1016 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1017 u8 *tx_polarity_inversion,
1018 u8 *rx_polarity_inversion, u8 *max_rate);
1019 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1020 unsigned int context, u64 err_status);
1021 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1022 static void handle_dcc_err(struct hfi1_devdata *dd,
1023 unsigned int context, u64 err_status);
1024 static void handle_lcb_err(struct hfi1_devdata *dd,
1025 unsigned int context, u64 err_status);
1026 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1030 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1031 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1032 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1033 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1034 static void set_partition_keys(struct hfi1_pportdata *ppd);
1035 static const char *link_state_name(u32 state);
1036 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1038 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1040 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1041 static int thermal_init(struct hfi1_devdata *dd);
1043 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1044 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1046 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1048 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1049 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1050 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1052 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1054 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1055 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1056 static void handle_temp_err(struct hfi1_devdata *dd);
1057 static void dc_shutdown(struct hfi1_devdata *dd);
1058 static void dc_start(struct hfi1_devdata *dd);
1059 static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
1061 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1062 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1063 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1064 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1067 * Error interrupt table entry. This is used as input to the interrupt
1068 * "clear down" routine used for all second tier error interrupt register.
1069 * Second tier interrupt registers have a single bit representing them
1070 * in the top-level CceIntStatus.
1072 struct err_reg_info {
1073 u32 status; /* status CSR offset */
1074 u32 clear; /* clear CSR offset */
1075 u32 mask; /* mask CSR offset */
1076 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1080 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1081 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1082 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1085 * Helpers for building HFI and DC error interrupt table entries. Different
1086 * helpers are needed because of inconsistent register names.
1088 #define EE(reg, handler, desc) \
1089 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1091 #define DC_EE1(reg, handler, desc) \
1092 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1093 #define DC_EE2(reg, handler, desc) \
1094 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1097 * Table of the "misc" grouping of error interrupts. Each entry refers to
1098 * another register containing more information.
1100 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1101 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1102 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1103 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1104 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1105 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1106 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1107 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1108 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1109 /* the rest are reserved */
1113 * Index into the Various section of the interrupt sources
1114 * corresponding to the Critical Temperature interrupt.
1116 #define TCRIT_INT_SOURCE 4
1119 * SDMA error interrupt entry - refers to another register containing more
1122 static const struct err_reg_info sdma_eng_err =
1123 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1125 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1126 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1127 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1128 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1129 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1130 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1131 /* rest are reserved */
1135 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1136 * register can not be derived from the MTU value because 10K is not
1137 * a power of 2. Therefore, we need a constant. Everything else can
1140 #define DCC_CFG_PORT_MTU_CAP_10240 7
1143 * Table of the DC grouping of error interrupts. Each entry refers to
1144 * another register containing more information.
1146 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1147 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1148 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1149 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1150 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1151 /* the rest are reserved */
1161 * csr to read for name (if applicable)
1166 * offset into dd or ppd to store the counter's value
1176 * accessor for stat element, context either dd or ppd
1178 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1179 int mode, u64 data);
1182 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1183 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1185 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1195 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1197 (counter * 8 + RCV_COUNTER_ARRAY32), \
1198 0, flags | CNTR_32BIT, \
1199 port_access_u32_csr)
1201 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1203 (counter * 8 + RCV_COUNTER_ARRAY32), \
1204 0, flags | CNTR_32BIT, \
1208 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1210 (counter * 8 + RCV_COUNTER_ARRAY64), \
1212 port_access_u64_csr)
1214 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1216 (counter * 8 + RCV_COUNTER_ARRAY64), \
1220 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1221 #define OVR_ELM(ctx) \
1222 CNTR_ELEM("RcvHdrOvr" #ctx, \
1223 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1224 0, CNTR_NORMAL, port_access_u64_csr)
1227 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1229 (counter * 8 + SEND_COUNTER_ARRAY32), \
1230 0, flags | CNTR_32BIT, \
1231 port_access_u32_csr)
1234 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1236 (counter * 8 + SEND_COUNTER_ARRAY64), \
1238 port_access_u64_csr)
1240 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1242 counter * 8 + SEND_COUNTER_ARRAY64, \
1248 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1250 (counter * 8 + CCE_COUNTER_ARRAY32), \
1251 0, flags | CNTR_32BIT, \
1254 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1256 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1257 0, flags | CNTR_32BIT, \
1261 #define DC_PERF_CNTR(name, counter, flags) \
1268 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1276 #define SW_IBP_CNTR(name, cntr) \
1284 * hfi1_addr_from_offset - return addr for readq/writeq
1285 * @dd: the dd device
1286 * @offset: the offset of the CSR within bar0
1288 * This routine selects the appropriate base address
1289 * based on the indicated offset.
1291 static inline void __iomem *hfi1_addr_from_offset(
1292 const struct hfi1_devdata *dd,
1295 if (offset >= dd->base2_start)
1296 return dd->kregbase2 + (offset - dd->base2_start);
1297 return dd->kregbase1 + offset;
1301 * read_csr - read CSR at the indicated offset
1302 * @dd: the dd device
1303 * @offset: the offset of the CSR within bar0
1305 * Return: the value read or all FF's if there
1308 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1310 if (dd->flags & HFI1_PRESENT)
1311 return readq(hfi1_addr_from_offset(dd, offset));
1316 * write_csr - write CSR at the indicated offset
1317 * @dd: the dd device
1318 * @offset: the offset of the CSR within bar0
1319 * @value: value to write
1321 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1323 if (dd->flags & HFI1_PRESENT) {
1324 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1326 /* avoid write to RcvArray */
1327 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1329 writeq(value, base);
1334 * get_csr_addr - return te iomem address for offset
1335 * @dd: the dd device
1336 * @offset: the offset of the CSR within bar0
1338 * Return: The iomem address to use in subsequent
1339 * writeq/readq operations.
1341 void __iomem *get_csr_addr(
1342 const struct hfi1_devdata *dd,
1345 if (dd->flags & HFI1_PRESENT)
1346 return hfi1_addr_from_offset(dd, offset);
1350 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1351 int mode, u64 value)
1355 if (mode == CNTR_MODE_R) {
1356 ret = read_csr(dd, csr);
1357 } else if (mode == CNTR_MODE_W) {
1358 write_csr(dd, csr, value);
1361 dd_dev_err(dd, "Invalid cntr register access mode");
1365 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1370 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1371 void *context, int vl, int mode, u64 data)
1373 struct hfi1_devdata *dd = context;
1374 u64 csr = entry->csr;
1376 if (entry->flags & CNTR_SDMA) {
1377 if (vl == CNTR_INVALID_VL)
1381 if (vl != CNTR_INVALID_VL)
1384 return read_write_csr(dd, csr, mode, data);
1387 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1388 void *context, int idx, int mode, u64 data)
1390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1392 if (dd->per_sdma && idx < dd->num_sdma)
1393 return dd->per_sdma[idx].err_cnt;
1397 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1398 void *context, int idx, int mode, u64 data)
1400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1402 if (dd->per_sdma && idx < dd->num_sdma)
1403 return dd->per_sdma[idx].sdma_int_cnt;
1407 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1408 void *context, int idx, int mode, u64 data)
1410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1412 if (dd->per_sdma && idx < dd->num_sdma)
1413 return dd->per_sdma[idx].idle_int_cnt;
1417 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1418 void *context, int idx, int mode,
1421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1423 if (dd->per_sdma && idx < dd->num_sdma)
1424 return dd->per_sdma[idx].progress_int_cnt;
1428 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1429 int vl, int mode, u64 data)
1431 struct hfi1_devdata *dd = context;
1434 u64 csr = entry->csr;
1436 if (entry->flags & CNTR_VL) {
1437 if (vl == CNTR_INVALID_VL)
1441 if (vl != CNTR_INVALID_VL)
1445 val = read_write_csr(dd, csr, mode, data);
1449 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1450 int vl, int mode, u64 data)
1452 struct hfi1_devdata *dd = context;
1453 u32 csr = entry->csr;
1456 if (vl != CNTR_INVALID_VL)
1458 if (mode == CNTR_MODE_R)
1459 ret = read_lcb_csr(dd, csr, &data);
1460 else if (mode == CNTR_MODE_W)
1461 ret = write_lcb_csr(dd, csr, data);
1464 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1468 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1473 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1474 int vl, int mode, u64 data)
1476 struct hfi1_pportdata *ppd = context;
1478 if (vl != CNTR_INVALID_VL)
1480 return read_write_csr(ppd->dd, entry->csr, mode, data);
1483 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1484 void *context, int vl, int mode, u64 data)
1486 struct hfi1_pportdata *ppd = context;
1488 u64 csr = entry->csr;
1490 if (entry->flags & CNTR_VL) {
1491 if (vl == CNTR_INVALID_VL)
1495 if (vl != CNTR_INVALID_VL)
1498 val = read_write_csr(ppd->dd, csr, mode, data);
1502 /* Software defined */
1503 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1508 if (mode == CNTR_MODE_R) {
1510 } else if (mode == CNTR_MODE_W) {
1514 dd_dev_err(dd, "Invalid cntr sw access mode");
1518 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1523 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1524 int vl, int mode, u64 data)
1526 struct hfi1_pportdata *ppd = context;
1528 if (vl != CNTR_INVALID_VL)
1530 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1533 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1534 int vl, int mode, u64 data)
1536 struct hfi1_pportdata *ppd = context;
1538 if (vl != CNTR_INVALID_VL)
1540 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1543 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1544 void *context, int vl, int mode,
1547 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1549 if (vl != CNTR_INVALID_VL)
1551 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1554 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1555 void *context, int vl, int mode, u64 data)
1557 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1561 if (vl == CNTR_INVALID_VL)
1562 counter = &ppd->port_xmit_discards;
1563 else if (vl >= 0 && vl < C_VL_COUNT)
1564 counter = &ppd->port_xmit_discards_vl[vl];
1568 return read_write_sw(ppd->dd, counter, mode, data);
1571 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1572 void *context, int vl, int mode,
1575 struct hfi1_pportdata *ppd = context;
1577 if (vl != CNTR_INVALID_VL)
1580 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1584 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1585 void *context, int vl, int mode, u64 data)
1587 struct hfi1_pportdata *ppd = context;
1589 if (vl != CNTR_INVALID_VL)
1592 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1596 u64 get_all_cpu_total(u64 __percpu *cntr)
1601 for_each_possible_cpu(cpu)
1602 counter += *per_cpu_ptr(cntr, cpu);
1606 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1608 int vl, int mode, u64 data)
1612 if (vl != CNTR_INVALID_VL)
1615 if (mode == CNTR_MODE_R) {
1616 ret = get_all_cpu_total(cntr) - *z_val;
1617 } else if (mode == CNTR_MODE_W) {
1618 /* A write can only zero the counter */
1620 *z_val = get_all_cpu_total(cntr);
1622 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1624 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1631 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1632 void *context, int vl, int mode, u64 data)
1634 struct hfi1_devdata *dd = context;
1636 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1640 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1641 void *context, int vl, int mode, u64 data)
1643 struct hfi1_devdata *dd = context;
1645 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1649 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1650 void *context, int vl, int mode, u64 data)
1652 struct hfi1_devdata *dd = context;
1654 return dd->verbs_dev.n_piowait;
1657 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1658 void *context, int vl, int mode, u64 data)
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662 return dd->verbs_dev.n_piodrain;
1665 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1666 void *context, int vl, int mode, u64 data)
1668 struct hfi1_devdata *dd = context;
1670 return dd->ctx0_seq_drop;
1673 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1674 void *context, int vl, int mode, u64 data)
1676 struct hfi1_devdata *dd = context;
1678 return dd->verbs_dev.n_txwait;
1681 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1682 void *context, int vl, int mode, u64 data)
1684 struct hfi1_devdata *dd = context;
1686 return dd->verbs_dev.n_kmem_wait;
1689 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1690 void *context, int vl, int mode, u64 data)
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1694 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1698 /* Software counters for the error status bits within MISC_ERR_STATUS */
1699 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1700 void *context, int vl, int mode,
1703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1705 return dd->misc_err_status_cnt[12];
1708 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1709 void *context, int vl, int mode,
1712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1714 return dd->misc_err_status_cnt[11];
1717 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1718 void *context, int vl, int mode,
1721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1723 return dd->misc_err_status_cnt[10];
1726 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1727 void *context, int vl,
1730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1732 return dd->misc_err_status_cnt[9];
1735 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1736 void *context, int vl, int mode,
1739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741 return dd->misc_err_status_cnt[8];
1744 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1745 const struct cntr_entry *entry,
1746 void *context, int vl, int mode, u64 data)
1748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1750 return dd->misc_err_status_cnt[7];
1753 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1754 void *context, int vl,
1757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759 return dd->misc_err_status_cnt[6];
1762 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1763 void *context, int vl, int mode,
1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768 return dd->misc_err_status_cnt[5];
1771 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1772 void *context, int vl, int mode,
1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777 return dd->misc_err_status_cnt[4];
1780 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1781 void *context, int vl,
1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786 return dd->misc_err_status_cnt[3];
1789 static u64 access_misc_csr_write_bad_addr_err_cnt(
1790 const struct cntr_entry *entry,
1791 void *context, int vl, int mode, u64 data)
1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795 return dd->misc_err_status_cnt[2];
1798 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1799 void *context, int vl,
1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804 return dd->misc_err_status_cnt[1];
1807 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1808 void *context, int vl, int mode,
1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813 return dd->misc_err_status_cnt[0];
1817 * Software counter for the aggregate of
1818 * individual CceErrStatus counters
1820 static u64 access_sw_cce_err_status_aggregated_cnt(
1821 const struct cntr_entry *entry,
1822 void *context, int vl, int mode, u64 data)
1824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826 return dd->sw_cce_err_status_aggregate;
1830 * Software counters corresponding to each of the
1831 * error status bits within CceErrStatus
1833 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1834 void *context, int vl, int mode,
1837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839 return dd->cce_err_status_cnt[40];
1842 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1843 void *context, int vl, int mode,
1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848 return dd->cce_err_status_cnt[39];
1851 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1852 void *context, int vl, int mode,
1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857 return dd->cce_err_status_cnt[38];
1860 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1861 void *context, int vl, int mode,
1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866 return dd->cce_err_status_cnt[37];
1869 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1870 void *context, int vl, int mode,
1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875 return dd->cce_err_status_cnt[36];
1878 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1879 const struct cntr_entry *entry,
1880 void *context, int vl, int mode, u64 data)
1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884 return dd->cce_err_status_cnt[35];
1887 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1888 const struct cntr_entry *entry,
1889 void *context, int vl, int mode, u64 data)
1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893 return dd->cce_err_status_cnt[34];
1896 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1897 void *context, int vl,
1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902 return dd->cce_err_status_cnt[33];
1905 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1906 void *context, int vl, int mode,
1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911 return dd->cce_err_status_cnt[32];
1914 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1915 void *context, int vl, int mode, u64 data)
1917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1919 return dd->cce_err_status_cnt[31];
1922 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1923 void *context, int vl, int mode,
1926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1928 return dd->cce_err_status_cnt[30];
1931 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1932 void *context, int vl, int mode,
1935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1937 return dd->cce_err_status_cnt[29];
1940 static u64 access_pcic_transmit_back_parity_err_cnt(
1941 const struct cntr_entry *entry,
1942 void *context, int vl, int mode, u64 data)
1944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1946 return dd->cce_err_status_cnt[28];
1949 static u64 access_pcic_transmit_front_parity_err_cnt(
1950 const struct cntr_entry *entry,
1951 void *context, int vl, int mode, u64 data)
1953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1955 return dd->cce_err_status_cnt[27];
1958 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1959 void *context, int vl, int mode,
1962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1964 return dd->cce_err_status_cnt[26];
1967 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1968 void *context, int vl, int mode,
1971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1973 return dd->cce_err_status_cnt[25];
1976 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1977 void *context, int vl, int mode,
1980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1982 return dd->cce_err_status_cnt[24];
1985 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1986 void *context, int vl, int mode,
1989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1991 return dd->cce_err_status_cnt[23];
1994 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1995 void *context, int vl,
1998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2000 return dd->cce_err_status_cnt[22];
2003 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2004 void *context, int vl, int mode,
2007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2009 return dd->cce_err_status_cnt[21];
2012 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2013 const struct cntr_entry *entry,
2014 void *context, int vl, int mode, u64 data)
2016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2018 return dd->cce_err_status_cnt[20];
2021 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2022 void *context, int vl,
2025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2027 return dd->cce_err_status_cnt[19];
2030 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2031 void *context, int vl, int mode,
2034 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2036 return dd->cce_err_status_cnt[18];
2039 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2040 void *context, int vl, int mode,
2043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2045 return dd->cce_err_status_cnt[17];
2048 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2049 void *context, int vl, int mode,
2052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054 return dd->cce_err_status_cnt[16];
2057 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2058 void *context, int vl, int mode,
2061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063 return dd->cce_err_status_cnt[15];
2066 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2067 void *context, int vl,
2070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072 return dd->cce_err_status_cnt[14];
2075 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2076 void *context, int vl, int mode,
2079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081 return dd->cce_err_status_cnt[13];
2084 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2085 const struct cntr_entry *entry,
2086 void *context, int vl, int mode, u64 data)
2088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090 return dd->cce_err_status_cnt[12];
2093 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2094 const struct cntr_entry *entry,
2095 void *context, int vl, int mode, u64 data)
2097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099 return dd->cce_err_status_cnt[11];
2102 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2103 const struct cntr_entry *entry,
2104 void *context, int vl, int mode, u64 data)
2106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108 return dd->cce_err_status_cnt[10];
2111 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2112 const struct cntr_entry *entry,
2113 void *context, int vl, int mode, u64 data)
2115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117 return dd->cce_err_status_cnt[9];
2120 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2121 const struct cntr_entry *entry,
2122 void *context, int vl, int mode, u64 data)
2124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126 return dd->cce_err_status_cnt[8];
2129 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2130 void *context, int vl,
2133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2135 return dd->cce_err_status_cnt[7];
2138 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2139 const struct cntr_entry *entry,
2140 void *context, int vl, int mode, u64 data)
2142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2144 return dd->cce_err_status_cnt[6];
2147 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2148 void *context, int vl, int mode,
2151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2153 return dd->cce_err_status_cnt[5];
2156 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2157 void *context, int vl, int mode,
2160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2162 return dd->cce_err_status_cnt[4];
2165 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2166 const struct cntr_entry *entry,
2167 void *context, int vl, int mode, u64 data)
2169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2171 return dd->cce_err_status_cnt[3];
2174 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2175 void *context, int vl,
2178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2180 return dd->cce_err_status_cnt[2];
2183 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2184 void *context, int vl,
2187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2189 return dd->cce_err_status_cnt[1];
2192 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2193 void *context, int vl, int mode,
2196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2198 return dd->cce_err_status_cnt[0];
2202 * Software counters corresponding to each of the
2203 * error status bits within RcvErrStatus
2205 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2206 void *context, int vl, int mode,
2209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211 return dd->rcv_err_status_cnt[63];
2214 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2215 void *context, int vl,
2218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220 return dd->rcv_err_status_cnt[62];
2223 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2224 void *context, int vl, int mode,
2227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2229 return dd->rcv_err_status_cnt[61];
2232 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2233 void *context, int vl, int mode,
2236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2238 return dd->rcv_err_status_cnt[60];
2241 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2242 void *context, int vl,
2245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2247 return dd->rcv_err_status_cnt[59];
2250 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2251 void *context, int vl,
2254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2256 return dd->rcv_err_status_cnt[58];
2259 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2260 void *context, int vl, int mode,
2263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2265 return dd->rcv_err_status_cnt[57];
2268 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2269 void *context, int vl, int mode,
2272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2274 return dd->rcv_err_status_cnt[56];
2277 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2278 void *context, int vl, int mode,
2281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2283 return dd->rcv_err_status_cnt[55];
2286 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2287 const struct cntr_entry *entry,
2288 void *context, int vl, int mode, u64 data)
2290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2292 return dd->rcv_err_status_cnt[54];
2295 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2296 const struct cntr_entry *entry,
2297 void *context, int vl, int mode, u64 data)
2299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2301 return dd->rcv_err_status_cnt[53];
2304 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2305 void *context, int vl,
2308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2310 return dd->rcv_err_status_cnt[52];
2313 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2314 void *context, int vl,
2317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2319 return dd->rcv_err_status_cnt[51];
2322 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2323 void *context, int vl,
2326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2328 return dd->rcv_err_status_cnt[50];
2331 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2332 void *context, int vl,
2335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2337 return dd->rcv_err_status_cnt[49];
2340 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2341 void *context, int vl,
2344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2346 return dd->rcv_err_status_cnt[48];
2349 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2350 void *context, int vl,
2353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2355 return dd->rcv_err_status_cnt[47];
2358 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2359 void *context, int vl, int mode,
2362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2364 return dd->rcv_err_status_cnt[46];
2367 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2368 const struct cntr_entry *entry,
2369 void *context, int vl, int mode, u64 data)
2371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2373 return dd->rcv_err_status_cnt[45];
2376 static u64 access_rx_lookup_csr_parity_err_cnt(
2377 const struct cntr_entry *entry,
2378 void *context, int vl, int mode, u64 data)
2380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2382 return dd->rcv_err_status_cnt[44];
2385 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2386 const struct cntr_entry *entry,
2387 void *context, int vl, int mode, u64 data)
2389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2391 return dd->rcv_err_status_cnt[43];
2394 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2395 const struct cntr_entry *entry,
2396 void *context, int vl, int mode, u64 data)
2398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2400 return dd->rcv_err_status_cnt[42];
2403 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2404 const struct cntr_entry *entry,
2405 void *context, int vl, int mode, u64 data)
2407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2409 return dd->rcv_err_status_cnt[41];
2412 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2413 const struct cntr_entry *entry,
2414 void *context, int vl, int mode, u64 data)
2416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2418 return dd->rcv_err_status_cnt[40];
2421 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2422 const struct cntr_entry *entry,
2423 void *context, int vl, int mode, u64 data)
2425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2427 return dd->rcv_err_status_cnt[39];
2430 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2431 const struct cntr_entry *entry,
2432 void *context, int vl, int mode, u64 data)
2434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2436 return dd->rcv_err_status_cnt[38];
2439 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2440 const struct cntr_entry *entry,
2441 void *context, int vl, int mode, u64 data)
2443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2445 return dd->rcv_err_status_cnt[37];
2448 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2449 const struct cntr_entry *entry,
2450 void *context, int vl, int mode, u64 data)
2452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2454 return dd->rcv_err_status_cnt[36];
2457 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2458 const struct cntr_entry *entry,
2459 void *context, int vl, int mode, u64 data)
2461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2463 return dd->rcv_err_status_cnt[35];
2466 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2467 const struct cntr_entry *entry,
2468 void *context, int vl, int mode, u64 data)
2470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2472 return dd->rcv_err_status_cnt[34];
2475 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2476 const struct cntr_entry *entry,
2477 void *context, int vl, int mode, u64 data)
2479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2481 return dd->rcv_err_status_cnt[33];
2484 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2485 void *context, int vl, int mode,
2488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2490 return dd->rcv_err_status_cnt[32];
2493 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2494 void *context, int vl, int mode,
2497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2499 return dd->rcv_err_status_cnt[31];
2502 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2503 void *context, int vl, int mode,
2506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2508 return dd->rcv_err_status_cnt[30];
2511 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2512 void *context, int vl, int mode,
2515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2517 return dd->rcv_err_status_cnt[29];
2520 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2521 void *context, int vl,
2524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2526 return dd->rcv_err_status_cnt[28];
2529 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2530 const struct cntr_entry *entry,
2531 void *context, int vl, int mode, u64 data)
2533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2535 return dd->rcv_err_status_cnt[27];
2538 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2539 const struct cntr_entry *entry,
2540 void *context, int vl, int mode, u64 data)
2542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2544 return dd->rcv_err_status_cnt[26];
2547 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2548 const struct cntr_entry *entry,
2549 void *context, int vl, int mode, u64 data)
2551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2553 return dd->rcv_err_status_cnt[25];
2556 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2557 const struct cntr_entry *entry,
2558 void *context, int vl, int mode, u64 data)
2560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2562 return dd->rcv_err_status_cnt[24];
2565 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2566 const struct cntr_entry *entry,
2567 void *context, int vl, int mode, u64 data)
2569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2571 return dd->rcv_err_status_cnt[23];
2574 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2575 const struct cntr_entry *entry,
2576 void *context, int vl, int mode, u64 data)
2578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2580 return dd->rcv_err_status_cnt[22];
2583 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2584 const struct cntr_entry *entry,
2585 void *context, int vl, int mode, u64 data)
2587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2589 return dd->rcv_err_status_cnt[21];
2592 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2593 const struct cntr_entry *entry,
2594 void *context, int vl, int mode, u64 data)
2596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2598 return dd->rcv_err_status_cnt[20];
2601 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2602 const struct cntr_entry *entry,
2603 void *context, int vl, int mode, u64 data)
2605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2607 return dd->rcv_err_status_cnt[19];
2610 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2611 void *context, int vl,
2614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2616 return dd->rcv_err_status_cnt[18];
2619 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2620 void *context, int vl,
2623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2625 return dd->rcv_err_status_cnt[17];
2628 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2629 const struct cntr_entry *entry,
2630 void *context, int vl, int mode, u64 data)
2632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634 return dd->rcv_err_status_cnt[16];
2637 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2638 const struct cntr_entry *entry,
2639 void *context, int vl, int mode, u64 data)
2641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643 return dd->rcv_err_status_cnt[15];
2646 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2647 void *context, int vl,
2650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652 return dd->rcv_err_status_cnt[14];
2655 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2656 void *context, int vl,
2659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661 return dd->rcv_err_status_cnt[13];
2664 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2665 void *context, int vl, int mode,
2668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670 return dd->rcv_err_status_cnt[12];
2673 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2674 void *context, int vl, int mode,
2677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679 return dd->rcv_err_status_cnt[11];
2682 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2683 void *context, int vl, int mode,
2686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688 return dd->rcv_err_status_cnt[10];
2691 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2692 void *context, int vl, int mode,
2695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697 return dd->rcv_err_status_cnt[9];
2700 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2701 void *context, int vl, int mode,
2704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706 return dd->rcv_err_status_cnt[8];
2709 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2710 const struct cntr_entry *entry,
2711 void *context, int vl, int mode, u64 data)
2713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2715 return dd->rcv_err_status_cnt[7];
2718 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2719 const struct cntr_entry *entry,
2720 void *context, int vl, int mode, u64 data)
2722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2724 return dd->rcv_err_status_cnt[6];
2727 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2728 void *context, int vl, int mode,
2731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2733 return dd->rcv_err_status_cnt[5];
2736 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2737 void *context, int vl, int mode,
2740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2742 return dd->rcv_err_status_cnt[4];
2745 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2746 void *context, int vl, int mode,
2749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2751 return dd->rcv_err_status_cnt[3];
2754 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2755 void *context, int vl, int mode,
2758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2760 return dd->rcv_err_status_cnt[2];
2763 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2764 void *context, int vl, int mode,
2767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2769 return dd->rcv_err_status_cnt[1];
2772 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2773 void *context, int vl, int mode,
2776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2778 return dd->rcv_err_status_cnt[0];
2782 * Software counters corresponding to each of the
2783 * error status bits within SendPioErrStatus
2785 static u64 access_pio_pec_sop_head_parity_err_cnt(
2786 const struct cntr_entry *entry,
2787 void *context, int vl, int mode, u64 data)
2789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791 return dd->send_pio_err_status_cnt[35];
2794 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2795 const struct cntr_entry *entry,
2796 void *context, int vl, int mode, u64 data)
2798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800 return dd->send_pio_err_status_cnt[34];
2803 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2804 const struct cntr_entry *entry,
2805 void *context, int vl, int mode, u64 data)
2807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2809 return dd->send_pio_err_status_cnt[33];
2812 static u64 access_pio_current_free_cnt_parity_err_cnt(
2813 const struct cntr_entry *entry,
2814 void *context, int vl, int mode, u64 data)
2816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2818 return dd->send_pio_err_status_cnt[32];
2821 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2822 void *context, int vl, int mode,
2825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2827 return dd->send_pio_err_status_cnt[31];
2830 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2831 void *context, int vl, int mode,
2834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2836 return dd->send_pio_err_status_cnt[30];
2839 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2840 void *context, int vl, int mode,
2843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2845 return dd->send_pio_err_status_cnt[29];
2848 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2849 const struct cntr_entry *entry,
2850 void *context, int vl, int mode, u64 data)
2852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2854 return dd->send_pio_err_status_cnt[28];
2857 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2858 void *context, int vl, int mode,
2861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2863 return dd->send_pio_err_status_cnt[27];
2866 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2867 void *context, int vl, int mode,
2870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2872 return dd->send_pio_err_status_cnt[26];
2875 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2876 void *context, int vl,
2879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2881 return dd->send_pio_err_status_cnt[25];
2884 static u64 access_pio_block_qw_count_parity_err_cnt(
2885 const struct cntr_entry *entry,
2886 void *context, int vl, int mode, u64 data)
2888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2890 return dd->send_pio_err_status_cnt[24];
2893 static u64 access_pio_write_qw_valid_parity_err_cnt(
2894 const struct cntr_entry *entry,
2895 void *context, int vl, int mode, u64 data)
2897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2899 return dd->send_pio_err_status_cnt[23];
2902 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2903 void *context, int vl, int mode,
2906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2908 return dd->send_pio_err_status_cnt[22];
2911 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2912 void *context, int vl,
2915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2917 return dd->send_pio_err_status_cnt[21];
2920 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2921 void *context, int vl,
2924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2926 return dd->send_pio_err_status_cnt[20];
2929 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2930 void *context, int vl,
2933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2935 return dd->send_pio_err_status_cnt[19];
2938 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2939 const struct cntr_entry *entry,
2940 void *context, int vl, int mode, u64 data)
2942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2944 return dd->send_pio_err_status_cnt[18];
2947 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2948 void *context, int vl, int mode,
2951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2953 return dd->send_pio_err_status_cnt[17];
2956 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2957 void *context, int vl, int mode,
2960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962 return dd->send_pio_err_status_cnt[16];
2965 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2966 const struct cntr_entry *entry,
2967 void *context, int vl, int mode, u64 data)
2969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971 return dd->send_pio_err_status_cnt[15];
2974 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2975 const struct cntr_entry *entry,
2976 void *context, int vl, int mode, u64 data)
2978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980 return dd->send_pio_err_status_cnt[14];
2983 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2984 const struct cntr_entry *entry,
2985 void *context, int vl, int mode, u64 data)
2987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989 return dd->send_pio_err_status_cnt[13];
2992 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2993 const struct cntr_entry *entry,
2994 void *context, int vl, int mode, u64 data)
2996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2998 return dd->send_pio_err_status_cnt[12];
3001 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3002 const struct cntr_entry *entry,
3003 void *context, int vl, int mode, u64 data)
3005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3007 return dd->send_pio_err_status_cnt[11];
3010 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3011 const struct cntr_entry *entry,
3012 void *context, int vl, int mode, u64 data)
3014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3016 return dd->send_pio_err_status_cnt[10];
3019 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3020 const struct cntr_entry *entry,
3021 void *context, int vl, int mode, u64 data)
3023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3025 return dd->send_pio_err_status_cnt[9];
3028 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3029 const struct cntr_entry *entry,
3030 void *context, int vl, int mode, u64 data)
3032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034 return dd->send_pio_err_status_cnt[8];
3037 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3038 const struct cntr_entry *entry,
3039 void *context, int vl, int mode, u64 data)
3041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3043 return dd->send_pio_err_status_cnt[7];
3046 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3047 void *context, int vl, int mode,
3050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3052 return dd->send_pio_err_status_cnt[6];
3055 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3056 void *context, int vl, int mode,
3059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061 return dd->send_pio_err_status_cnt[5];
3064 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3065 void *context, int vl, int mode,
3068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3070 return dd->send_pio_err_status_cnt[4];
3073 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3074 void *context, int vl, int mode,
3077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079 return dd->send_pio_err_status_cnt[3];
3082 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3083 void *context, int vl, int mode,
3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088 return dd->send_pio_err_status_cnt[2];
3091 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3092 void *context, int vl,
3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097 return dd->send_pio_err_status_cnt[1];
3100 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3101 void *context, int vl, int mode,
3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106 return dd->send_pio_err_status_cnt[0];
3110 * Software counters corresponding to each of the
3111 * error status bits within SendDmaErrStatus
3113 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3114 const struct cntr_entry *entry,
3115 void *context, int vl, int mode, u64 data)
3117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119 return dd->send_dma_err_status_cnt[3];
3122 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3123 const struct cntr_entry *entry,
3124 void *context, int vl, int mode, u64 data)
3126 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128 return dd->send_dma_err_status_cnt[2];
3131 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3132 void *context, int vl, int mode,
3135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3137 return dd->send_dma_err_status_cnt[1];
3140 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3141 void *context, int vl, int mode,
3144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3146 return dd->send_dma_err_status_cnt[0];
3150 * Software counters corresponding to each of the
3151 * error status bits within SendEgressErrStatus
3153 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3154 const struct cntr_entry *entry,
3155 void *context, int vl, int mode, u64 data)
3157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159 return dd->send_egress_err_status_cnt[63];
3162 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3163 const struct cntr_entry *entry,
3164 void *context, int vl, int mode, u64 data)
3166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168 return dd->send_egress_err_status_cnt[62];
3171 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3172 void *context, int vl, int mode,
3175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3177 return dd->send_egress_err_status_cnt[61];
3180 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3181 void *context, int vl,
3184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3186 return dd->send_egress_err_status_cnt[60];
3189 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3190 const struct cntr_entry *entry,
3191 void *context, int vl, int mode, u64 data)
3193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3195 return dd->send_egress_err_status_cnt[59];
3198 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3199 void *context, int vl, int mode,
3202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3204 return dd->send_egress_err_status_cnt[58];
3207 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3208 void *context, int vl, int mode,
3211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3213 return dd->send_egress_err_status_cnt[57];
3216 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3217 void *context, int vl, int mode,
3220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3222 return dd->send_egress_err_status_cnt[56];
3225 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3226 void *context, int vl, int mode,
3229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3231 return dd->send_egress_err_status_cnt[55];
3234 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3235 void *context, int vl, int mode,
3238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3240 return dd->send_egress_err_status_cnt[54];
3243 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3244 void *context, int vl, int mode,
3247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3249 return dd->send_egress_err_status_cnt[53];
3252 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3253 void *context, int vl, int mode,
3256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3258 return dd->send_egress_err_status_cnt[52];
3261 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3262 void *context, int vl, int mode,
3265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3267 return dd->send_egress_err_status_cnt[51];
3270 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3271 void *context, int vl, int mode,
3274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3276 return dd->send_egress_err_status_cnt[50];
3279 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3280 void *context, int vl, int mode,
3283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3285 return dd->send_egress_err_status_cnt[49];
3288 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3289 void *context, int vl, int mode,
3292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3294 return dd->send_egress_err_status_cnt[48];
3297 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3298 void *context, int vl, int mode,
3301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3303 return dd->send_egress_err_status_cnt[47];
3306 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3307 void *context, int vl, int mode,
3310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3312 return dd->send_egress_err_status_cnt[46];
3315 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3316 void *context, int vl, int mode,
3319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3321 return dd->send_egress_err_status_cnt[45];
3324 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3325 void *context, int vl,
3328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3330 return dd->send_egress_err_status_cnt[44];
3333 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3334 const struct cntr_entry *entry,
3335 void *context, int vl, int mode, u64 data)
3337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3339 return dd->send_egress_err_status_cnt[43];
3342 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3343 void *context, int vl, int mode,
3346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3348 return dd->send_egress_err_status_cnt[42];
3351 static u64 access_tx_credit_return_partiy_err_cnt(
3352 const struct cntr_entry *entry,
3353 void *context, int vl, int mode, u64 data)
3355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3357 return dd->send_egress_err_status_cnt[41];
3360 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3361 const struct cntr_entry *entry,
3362 void *context, int vl, int mode, u64 data)
3364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3366 return dd->send_egress_err_status_cnt[40];
3369 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3370 const struct cntr_entry *entry,
3371 void *context, int vl, int mode, u64 data)
3373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3375 return dd->send_egress_err_status_cnt[39];
3378 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3379 const struct cntr_entry *entry,
3380 void *context, int vl, int mode, u64 data)
3382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3384 return dd->send_egress_err_status_cnt[38];
3387 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3388 const struct cntr_entry *entry,
3389 void *context, int vl, int mode, u64 data)
3391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3393 return dd->send_egress_err_status_cnt[37];
3396 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3397 const struct cntr_entry *entry,
3398 void *context, int vl, int mode, u64 data)
3400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3402 return dd->send_egress_err_status_cnt[36];
3405 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3406 const struct cntr_entry *entry,
3407 void *context, int vl, int mode, u64 data)
3409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3411 return dd->send_egress_err_status_cnt[35];
3414 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3415 const struct cntr_entry *entry,
3416 void *context, int vl, int mode, u64 data)
3418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3420 return dd->send_egress_err_status_cnt[34];
3423 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3424 const struct cntr_entry *entry,
3425 void *context, int vl, int mode, u64 data)
3427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3429 return dd->send_egress_err_status_cnt[33];
3432 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3433 const struct cntr_entry *entry,
3434 void *context, int vl, int mode, u64 data)
3436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3438 return dd->send_egress_err_status_cnt[32];
3441 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3442 const struct cntr_entry *entry,
3443 void *context, int vl, int mode, u64 data)
3445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3447 return dd->send_egress_err_status_cnt[31];
3450 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3451 const struct cntr_entry *entry,
3452 void *context, int vl, int mode, u64 data)
3454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3456 return dd->send_egress_err_status_cnt[30];
3459 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3460 const struct cntr_entry *entry,
3461 void *context, int vl, int mode, u64 data)
3463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3465 return dd->send_egress_err_status_cnt[29];
3468 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3469 const struct cntr_entry *entry,
3470 void *context, int vl, int mode, u64 data)
3472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3474 return dd->send_egress_err_status_cnt[28];
3477 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3478 const struct cntr_entry *entry,
3479 void *context, int vl, int mode, u64 data)
3481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3483 return dd->send_egress_err_status_cnt[27];
3486 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3487 const struct cntr_entry *entry,
3488 void *context, int vl, int mode, u64 data)
3490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3492 return dd->send_egress_err_status_cnt[26];
3495 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3496 const struct cntr_entry *entry,
3497 void *context, int vl, int mode, u64 data)
3499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3501 return dd->send_egress_err_status_cnt[25];
3504 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3505 const struct cntr_entry *entry,
3506 void *context, int vl, int mode, u64 data)
3508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3510 return dd->send_egress_err_status_cnt[24];
3513 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3514 const struct cntr_entry *entry,
3515 void *context, int vl, int mode, u64 data)
3517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3519 return dd->send_egress_err_status_cnt[23];
3522 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3523 const struct cntr_entry *entry,
3524 void *context, int vl, int mode, u64 data)
3526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3528 return dd->send_egress_err_status_cnt[22];
3531 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3532 const struct cntr_entry *entry,
3533 void *context, int vl, int mode, u64 data)
3535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3537 return dd->send_egress_err_status_cnt[21];
3540 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3541 const struct cntr_entry *entry,
3542 void *context, int vl, int mode, u64 data)
3544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3546 return dd->send_egress_err_status_cnt[20];
3549 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3550 const struct cntr_entry *entry,
3551 void *context, int vl, int mode, u64 data)
3553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3555 return dd->send_egress_err_status_cnt[19];
3558 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3559 const struct cntr_entry *entry,
3560 void *context, int vl, int mode, u64 data)
3562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3564 return dd->send_egress_err_status_cnt[18];
3567 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3568 const struct cntr_entry *entry,
3569 void *context, int vl, int mode, u64 data)
3571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3573 return dd->send_egress_err_status_cnt[17];
3576 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3577 const struct cntr_entry *entry,
3578 void *context, int vl, int mode, u64 data)
3580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582 return dd->send_egress_err_status_cnt[16];
3585 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3586 void *context, int vl, int mode,
3589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591 return dd->send_egress_err_status_cnt[15];
3594 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3595 void *context, int vl,
3598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600 return dd->send_egress_err_status_cnt[14];
3603 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3604 void *context, int vl, int mode,
3607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3609 return dd->send_egress_err_status_cnt[13];
3612 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3613 void *context, int vl, int mode,
3616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3618 return dd->send_egress_err_status_cnt[12];
3621 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3622 const struct cntr_entry *entry,
3623 void *context, int vl, int mode, u64 data)
3625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3627 return dd->send_egress_err_status_cnt[11];
3630 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3631 void *context, int vl, int mode,
3634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3636 return dd->send_egress_err_status_cnt[10];
3639 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3640 void *context, int vl, int mode,
3643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3645 return dd->send_egress_err_status_cnt[9];
3648 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3649 const struct cntr_entry *entry,
3650 void *context, int vl, int mode, u64 data)
3652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654 return dd->send_egress_err_status_cnt[8];
3657 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3658 const struct cntr_entry *entry,
3659 void *context, int vl, int mode, u64 data)
3661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3663 return dd->send_egress_err_status_cnt[7];
3666 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3667 void *context, int vl, int mode,
3670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672 return dd->send_egress_err_status_cnt[6];
3675 static u64 access_tx_incorrect_link_state_err_cnt(
3676 const struct cntr_entry *entry,
3677 void *context, int vl, int mode, u64 data)
3679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3681 return dd->send_egress_err_status_cnt[5];
3684 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3685 void *context, int vl, int mode,
3688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690 return dd->send_egress_err_status_cnt[4];
3693 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3694 const struct cntr_entry *entry,
3695 void *context, int vl, int mode, u64 data)
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699 return dd->send_egress_err_status_cnt[3];
3702 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3703 void *context, int vl, int mode,
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708 return dd->send_egress_err_status_cnt[2];
3711 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3712 const struct cntr_entry *entry,
3713 void *context, int vl, int mode, u64 data)
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717 return dd->send_egress_err_status_cnt[1];
3720 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3721 const struct cntr_entry *entry,
3722 void *context, int vl, int mode, u64 data)
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726 return dd->send_egress_err_status_cnt[0];
3730 * Software counters corresponding to each of the
3731 * error status bits within SendErrStatus
3733 static u64 access_send_csr_write_bad_addr_err_cnt(
3734 const struct cntr_entry *entry,
3735 void *context, int vl, int mode, u64 data)
3737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739 return dd->send_err_status_cnt[2];
3742 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3743 void *context, int vl,
3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748 return dd->send_err_status_cnt[1];
3751 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3752 void *context, int vl, int mode,
3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757 return dd->send_err_status_cnt[0];
3761 * Software counters corresponding to each of the
3762 * error status bits within SendCtxtErrStatus
3764 static u64 access_pio_write_out_of_bounds_err_cnt(
3765 const struct cntr_entry *entry,
3766 void *context, int vl, int mode, u64 data)
3768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770 return dd->sw_ctxt_err_status_cnt[4];
3773 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3774 void *context, int vl, int mode,
3777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779 return dd->sw_ctxt_err_status_cnt[3];
3782 static u64 access_pio_write_crosses_boundary_err_cnt(
3783 const struct cntr_entry *entry,
3784 void *context, int vl, int mode, u64 data)
3786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3788 return dd->sw_ctxt_err_status_cnt[2];
3791 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3792 void *context, int vl,
3795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3797 return dd->sw_ctxt_err_status_cnt[1];
3800 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3801 void *context, int vl, int mode,
3804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3806 return dd->sw_ctxt_err_status_cnt[0];
3810 * Software counters corresponding to each of the
3811 * error status bits within SendDmaEngErrStatus
3813 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3814 const struct cntr_entry *entry,
3815 void *context, int vl, int mode, u64 data)
3817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819 return dd->sw_send_dma_eng_err_status_cnt[23];
3822 static u64 access_sdma_header_storage_cor_err_cnt(
3823 const struct cntr_entry *entry,
3824 void *context, int vl, int mode, u64 data)
3826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828 return dd->sw_send_dma_eng_err_status_cnt[22];
3831 static u64 access_sdma_packet_tracking_cor_err_cnt(
3832 const struct cntr_entry *entry,
3833 void *context, int vl, int mode, u64 data)
3835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3837 return dd->sw_send_dma_eng_err_status_cnt[21];
3840 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3841 void *context, int vl, int mode,
3844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3846 return dd->sw_send_dma_eng_err_status_cnt[20];
3849 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3850 void *context, int vl, int mode,
3853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3855 return dd->sw_send_dma_eng_err_status_cnt[19];
3858 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3859 const struct cntr_entry *entry,
3860 void *context, int vl, int mode, u64 data)
3862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3864 return dd->sw_send_dma_eng_err_status_cnt[18];
3867 static u64 access_sdma_header_storage_unc_err_cnt(
3868 const struct cntr_entry *entry,
3869 void *context, int vl, int mode, u64 data)
3871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873 return dd->sw_send_dma_eng_err_status_cnt[17];
3876 static u64 access_sdma_packet_tracking_unc_err_cnt(
3877 const struct cntr_entry *entry,
3878 void *context, int vl, int mode, u64 data)
3880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882 return dd->sw_send_dma_eng_err_status_cnt[16];
3885 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3886 void *context, int vl, int mode,
3889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891 return dd->sw_send_dma_eng_err_status_cnt[15];
3894 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3895 void *context, int vl, int mode,
3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900 return dd->sw_send_dma_eng_err_status_cnt[14];
3903 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3904 void *context, int vl, int mode,
3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909 return dd->sw_send_dma_eng_err_status_cnt[13];
3912 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3913 void *context, int vl, int mode,
3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918 return dd->sw_send_dma_eng_err_status_cnt[12];
3921 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3922 void *context, int vl, int mode,
3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927 return dd->sw_send_dma_eng_err_status_cnt[11];
3930 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3931 void *context, int vl, int mode,
3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936 return dd->sw_send_dma_eng_err_status_cnt[10];
3939 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3940 void *context, int vl, int mode,
3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945 return dd->sw_send_dma_eng_err_status_cnt[9];
3948 static u64 access_sdma_packet_desc_overflow_err_cnt(
3949 const struct cntr_entry *entry,
3950 void *context, int vl, int mode, u64 data)
3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3954 return dd->sw_send_dma_eng_err_status_cnt[8];
3957 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3958 void *context, int vl,
3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3963 return dd->sw_send_dma_eng_err_status_cnt[7];
3966 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3967 void *context, int vl, int mode, u64 data)
3969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3971 return dd->sw_send_dma_eng_err_status_cnt[6];
3974 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3975 void *context, int vl, int mode,
3978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3980 return dd->sw_send_dma_eng_err_status_cnt[5];
3983 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3984 void *context, int vl, int mode,
3987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3989 return dd->sw_send_dma_eng_err_status_cnt[4];
3992 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3993 const struct cntr_entry *entry,
3994 void *context, int vl, int mode, u64 data)
3996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3998 return dd->sw_send_dma_eng_err_status_cnt[3];
4001 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4002 void *context, int vl, int mode,
4005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4007 return dd->sw_send_dma_eng_err_status_cnt[2];
4010 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4011 void *context, int vl, int mode,
4014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4016 return dd->sw_send_dma_eng_err_status_cnt[1];
4019 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4020 void *context, int vl, int mode,
4023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4025 return dd->sw_send_dma_eng_err_status_cnt[0];
4028 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4029 void *context, int vl, int mode,
4032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4035 u64 csr = entry->csr;
4037 val = read_write_csr(dd, csr, mode, data);
4038 if (mode == CNTR_MODE_R) {
4039 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4040 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4041 } else if (mode == CNTR_MODE_W) {
4042 dd->sw_rcv_bypass_packet_errors = 0;
4044 dd_dev_err(dd, "Invalid cntr register access mode");
4050 #define def_access_sw_cpu(cntr) \
4051 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4052 void *context, int vl, int mode, u64 data) \
4054 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4055 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4056 ppd->ibport_data.rvp.cntr, vl, \
4060 def_access_sw_cpu(rc_acks);
4061 def_access_sw_cpu(rc_qacks);
4062 def_access_sw_cpu(rc_delayed_comp);
4064 #define def_access_ibp_counter(cntr) \
4065 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4066 void *context, int vl, int mode, u64 data) \
4068 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4070 if (vl != CNTR_INVALID_VL) \
4073 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4077 def_access_ibp_counter(loop_pkts);
4078 def_access_ibp_counter(rc_resends);
4079 def_access_ibp_counter(rnr_naks);
4080 def_access_ibp_counter(other_naks);
4081 def_access_ibp_counter(rc_timeouts);
4082 def_access_ibp_counter(pkt_drops);
4083 def_access_ibp_counter(dmawait);
4084 def_access_ibp_counter(rc_seqnak);
4085 def_access_ibp_counter(rc_dupreq);
4086 def_access_ibp_counter(rdma_seq);
4087 def_access_ibp_counter(unaligned);
4088 def_access_ibp_counter(seq_naks);
4089 def_access_ibp_counter(rc_crwaits);
4091 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4092 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4093 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4094 [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
4095 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4096 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4097 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4099 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4101 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4102 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4104 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4106 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4107 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4108 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4109 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4110 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4112 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4114 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4116 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4118 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4120 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4122 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4123 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4124 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4125 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4126 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4128 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4129 access_dc_rcv_err_cnt),
4130 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4132 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4134 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4136 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4137 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4138 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4139 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4141 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4142 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4143 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4145 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4147 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4149 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4151 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4153 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4155 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4157 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4158 CNTR_SYNTH | CNTR_VL),
4159 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4160 CNTR_SYNTH | CNTR_VL),
4161 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4162 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4163 CNTR_SYNTH | CNTR_VL),
4164 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4165 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4166 CNTR_SYNTH | CNTR_VL),
4167 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4169 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4170 CNTR_SYNTH | CNTR_VL),
4171 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4173 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4174 CNTR_SYNTH | CNTR_VL),
4176 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4178 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4180 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4182 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4184 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4186 [C_DC_CRC_MULT_LN] =
4187 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4189 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4191 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4193 [C_DC_SEQ_CRC_CNT] =
4194 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4196 [C_DC_ESC0_ONLY_CNT] =
4197 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4199 [C_DC_ESC0_PLUS1_CNT] =
4200 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4202 [C_DC_ESC0_PLUS2_CNT] =
4203 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4205 [C_DC_REINIT_FROM_PEER_CNT] =
4206 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4208 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4210 [C_DC_MISC_FLG_CNT] =
4211 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4213 [C_DC_PRF_GOOD_LTP_CNT] =
4214 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4215 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4216 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4218 [C_DC_PRF_RX_FLIT_CNT] =
4219 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4220 [C_DC_PRF_TX_FLIT_CNT] =
4221 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4222 [C_DC_PRF_CLK_CNTR] =
4223 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4224 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4225 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4226 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4227 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4229 [C_DC_PG_STS_TX_SBE_CNT] =
4230 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4231 [C_DC_PG_STS_TX_MBE_CNT] =
4232 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4234 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4235 access_sw_cpu_intr),
4236 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4237 access_sw_cpu_rcv_limit),
4238 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4239 access_sw_ctx0_seq_drop),
4240 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4241 access_sw_vtx_wait),
4242 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4243 access_sw_pio_wait),
4244 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4245 access_sw_pio_drain),
4246 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4247 access_sw_kmem_wait),
4248 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4249 hfi1_access_sw_tid_wait),
4250 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4251 access_sw_send_schedule),
4252 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4253 SEND_DMA_DESC_FETCHED_CNT, 0,
4254 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4255 dev_access_u32_csr),
4256 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4257 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4258 access_sde_int_cnt),
4259 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4260 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4261 access_sde_err_cnt),
4262 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4263 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4264 access_sde_idle_int_cnt),
4265 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4266 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267 access_sde_progress_int_cnt),
4268 /* MISC_ERR_STATUS */
4269 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4271 access_misc_pll_lock_fail_err_cnt),
4272 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4274 access_misc_mbist_fail_err_cnt),
4275 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4277 access_misc_invalid_eep_cmd_err_cnt),
4278 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4280 access_misc_efuse_done_parity_err_cnt),
4281 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4283 access_misc_efuse_write_err_cnt),
4284 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4286 access_misc_efuse_read_bad_addr_err_cnt),
4287 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4289 access_misc_efuse_csr_parity_err_cnt),
4290 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4292 access_misc_fw_auth_failed_err_cnt),
4293 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4295 access_misc_key_mismatch_err_cnt),
4296 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4298 access_misc_sbus_write_failed_err_cnt),
4299 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4301 access_misc_csr_write_bad_addr_err_cnt),
4302 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4304 access_misc_csr_read_bad_addr_err_cnt),
4305 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4307 access_misc_csr_parity_err_cnt),
4309 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4311 access_sw_cce_err_status_aggregated_cnt),
4312 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4314 access_cce_msix_csr_parity_err_cnt),
4315 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4317 access_cce_int_map_unc_err_cnt),
4318 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4320 access_cce_int_map_cor_err_cnt),
4321 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4323 access_cce_msix_table_unc_err_cnt),
4324 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4326 access_cce_msix_table_cor_err_cnt),
4327 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4329 access_cce_rxdma_conv_fifo_parity_err_cnt),
4330 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4332 access_cce_rcpl_async_fifo_parity_err_cnt),
4333 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4335 access_cce_seg_write_bad_addr_err_cnt),
4336 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4338 access_cce_seg_read_bad_addr_err_cnt),
4339 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4341 access_la_triggered_cnt),
4342 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4344 access_cce_trgt_cpl_timeout_err_cnt),
4345 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4347 access_pcic_receive_parity_err_cnt),
4348 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4350 access_pcic_transmit_back_parity_err_cnt),
4351 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4353 access_pcic_transmit_front_parity_err_cnt),
4354 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4356 access_pcic_cpl_dat_q_unc_err_cnt),
4357 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4359 access_pcic_cpl_hd_q_unc_err_cnt),
4360 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4362 access_pcic_post_dat_q_unc_err_cnt),
4363 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4365 access_pcic_post_hd_q_unc_err_cnt),
4366 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4368 access_pcic_retry_sot_mem_unc_err_cnt),
4369 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4371 access_pcic_retry_mem_unc_err),
4372 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4374 access_pcic_n_post_dat_q_parity_err_cnt),
4375 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4377 access_pcic_n_post_h_q_parity_err_cnt),
4378 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4380 access_pcic_cpl_dat_q_cor_err_cnt),
4381 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4383 access_pcic_cpl_hd_q_cor_err_cnt),
4384 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4386 access_pcic_post_dat_q_cor_err_cnt),
4387 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4389 access_pcic_post_hd_q_cor_err_cnt),
4390 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4392 access_pcic_retry_sot_mem_cor_err_cnt),
4393 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4395 access_pcic_retry_mem_cor_err_cnt),
4396 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4397 "CceCli1AsyncFifoDbgParityError", 0, 0,
4399 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4400 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4401 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4403 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4405 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4406 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4408 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4409 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4410 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4412 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4413 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4415 access_cce_cli2_async_fifo_parity_err_cnt),
4416 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4418 access_cce_csr_cfg_bus_parity_err_cnt),
4419 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4421 access_cce_cli0_async_fifo_parity_err_cnt),
4422 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4424 access_cce_rspd_data_parity_err_cnt),
4425 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4427 access_cce_trgt_access_err_cnt),
4428 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4430 access_cce_trgt_async_fifo_parity_err_cnt),
4431 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4433 access_cce_csr_write_bad_addr_err_cnt),
4434 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4436 access_cce_csr_read_bad_addr_err_cnt),
4437 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4439 access_ccs_csr_parity_err_cnt),
4442 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4444 access_rx_csr_parity_err_cnt),
4445 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4447 access_rx_csr_write_bad_addr_err_cnt),
4448 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4450 access_rx_csr_read_bad_addr_err_cnt),
4451 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4453 access_rx_dma_csr_unc_err_cnt),
4454 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4456 access_rx_dma_dq_fsm_encoding_err_cnt),
4457 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4459 access_rx_dma_eq_fsm_encoding_err_cnt),
4460 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4462 access_rx_dma_csr_parity_err_cnt),
4463 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4465 access_rx_rbuf_data_cor_err_cnt),
4466 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4468 access_rx_rbuf_data_unc_err_cnt),
4469 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4471 access_rx_dma_data_fifo_rd_cor_err_cnt),
4472 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4474 access_rx_dma_data_fifo_rd_unc_err_cnt),
4475 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4477 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4478 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4480 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4481 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4483 access_rx_rbuf_desc_part2_cor_err_cnt),
4484 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4486 access_rx_rbuf_desc_part2_unc_err_cnt),
4487 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4489 access_rx_rbuf_desc_part1_cor_err_cnt),
4490 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4492 access_rx_rbuf_desc_part1_unc_err_cnt),
4493 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4495 access_rx_hq_intr_fsm_err_cnt),
4496 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4498 access_rx_hq_intr_csr_parity_err_cnt),
4499 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4501 access_rx_lookup_csr_parity_err_cnt),
4502 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4504 access_rx_lookup_rcv_array_cor_err_cnt),
4505 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4507 access_rx_lookup_rcv_array_unc_err_cnt),
4508 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4510 access_rx_lookup_des_part2_parity_err_cnt),
4511 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4513 access_rx_lookup_des_part1_unc_cor_err_cnt),
4514 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4516 access_rx_lookup_des_part1_unc_err_cnt),
4517 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4519 access_rx_rbuf_next_free_buf_cor_err_cnt),
4520 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4522 access_rx_rbuf_next_free_buf_unc_err_cnt),
4523 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4524 "RxRbufFlInitWrAddrParityErr", 0, 0,
4526 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4527 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4529 access_rx_rbuf_fl_initdone_parity_err_cnt),
4530 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4532 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4533 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4535 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4536 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4538 access_rx_rbuf_empty_err_cnt),
4539 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4541 access_rx_rbuf_full_err_cnt),
4542 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4544 access_rbuf_bad_lookup_err_cnt),
4545 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4547 access_rbuf_ctx_id_parity_err_cnt),
4548 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4550 access_rbuf_csr_qeopdw_parity_err_cnt),
4551 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4552 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4554 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4555 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4556 "RxRbufCsrQTlPtrParityErr", 0, 0,
4558 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4559 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4561 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4562 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4564 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4565 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4567 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4568 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4570 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4571 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4572 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4574 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4575 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4577 access_rx_rbuf_block_list_read_cor_err_cnt),
4578 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4580 access_rx_rbuf_block_list_read_unc_err_cnt),
4581 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4583 access_rx_rbuf_lookup_des_cor_err_cnt),
4584 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4586 access_rx_rbuf_lookup_des_unc_err_cnt),
4587 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4588 "RxRbufLookupDesRegUncCorErr", 0, 0,
4590 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4591 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4593 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4594 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4596 access_rx_rbuf_free_list_cor_err_cnt),
4597 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4599 access_rx_rbuf_free_list_unc_err_cnt),
4600 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4602 access_rx_rcv_fsm_encoding_err_cnt),
4603 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4605 access_rx_dma_flag_cor_err_cnt),
4606 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4608 access_rx_dma_flag_unc_err_cnt),
4609 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4611 access_rx_dc_sop_eop_parity_err_cnt),
4612 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4614 access_rx_rcv_csr_parity_err_cnt),
4615 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4617 access_rx_rcv_qp_map_table_cor_err_cnt),
4618 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4620 access_rx_rcv_qp_map_table_unc_err_cnt),
4621 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4623 access_rx_rcv_data_cor_err_cnt),
4624 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4626 access_rx_rcv_data_unc_err_cnt),
4627 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4629 access_rx_rcv_hdr_cor_err_cnt),
4630 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4632 access_rx_rcv_hdr_unc_err_cnt),
4633 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4635 access_rx_dc_intf_parity_err_cnt),
4636 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4638 access_rx_dma_csr_cor_err_cnt),
4639 /* SendPioErrStatus */
4640 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4642 access_pio_pec_sop_head_parity_err_cnt),
4643 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4645 access_pio_pcc_sop_head_parity_err_cnt),
4646 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4648 access_pio_last_returned_cnt_parity_err_cnt),
4649 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4651 access_pio_current_free_cnt_parity_err_cnt),
4652 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4654 access_pio_reserved_31_err_cnt),
4655 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4657 access_pio_reserved_30_err_cnt),
4658 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4660 access_pio_ppmc_sop_len_err_cnt),
4661 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4663 access_pio_ppmc_bqc_mem_parity_err_cnt),
4664 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4666 access_pio_vl_fifo_parity_err_cnt),
4667 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4669 access_pio_vlf_sop_parity_err_cnt),
4670 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4672 access_pio_vlf_v1_len_parity_err_cnt),
4673 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4675 access_pio_block_qw_count_parity_err_cnt),
4676 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4678 access_pio_write_qw_valid_parity_err_cnt),
4679 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4681 access_pio_state_machine_err_cnt),
4682 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4684 access_pio_write_data_parity_err_cnt),
4685 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4687 access_pio_host_addr_mem_cor_err_cnt),
4688 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4690 access_pio_host_addr_mem_unc_err_cnt),
4691 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4693 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4694 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4696 access_pio_init_sm_in_err_cnt),
4697 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4699 access_pio_ppmc_pbl_fifo_err_cnt),
4700 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4702 access_pio_credit_ret_fifo_parity_err_cnt),
4703 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4705 access_pio_v1_len_mem_bank1_cor_err_cnt),
4706 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4708 access_pio_v1_len_mem_bank0_cor_err_cnt),
4709 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4711 access_pio_v1_len_mem_bank1_unc_err_cnt),
4712 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4714 access_pio_v1_len_mem_bank0_unc_err_cnt),
4715 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4717 access_pio_sm_pkt_reset_parity_err_cnt),
4718 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4720 access_pio_pkt_evict_fifo_parity_err_cnt),
4721 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4722 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4724 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4725 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4727 access_pio_sbrdctl_crrel_parity_err_cnt),
4728 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4730 access_pio_pec_fifo_parity_err_cnt),
4731 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4733 access_pio_pcc_fifo_parity_err_cnt),
4734 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4736 access_pio_sb_mem_fifo1_err_cnt),
4737 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4739 access_pio_sb_mem_fifo0_err_cnt),
4740 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4742 access_pio_csr_parity_err_cnt),
4743 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4745 access_pio_write_addr_parity_err_cnt),
4746 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4748 access_pio_write_bad_ctxt_err_cnt),
4749 /* SendDmaErrStatus */
4750 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4752 access_sdma_pcie_req_tracking_cor_err_cnt),
4753 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4755 access_sdma_pcie_req_tracking_unc_err_cnt),
4756 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4758 access_sdma_csr_parity_err_cnt),
4759 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4761 access_sdma_rpy_tag_err_cnt),
4762 /* SendEgressErrStatus */
4763 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4765 access_tx_read_pio_memory_csr_unc_err_cnt),
4766 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4768 access_tx_read_sdma_memory_csr_err_cnt),
4769 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4771 access_tx_egress_fifo_cor_err_cnt),
4772 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4774 access_tx_read_pio_memory_cor_err_cnt),
4775 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4777 access_tx_read_sdma_memory_cor_err_cnt),
4778 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4780 access_tx_sb_hdr_cor_err_cnt),
4781 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4783 access_tx_credit_overrun_err_cnt),
4784 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4786 access_tx_launch_fifo8_cor_err_cnt),
4787 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4789 access_tx_launch_fifo7_cor_err_cnt),
4790 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4792 access_tx_launch_fifo6_cor_err_cnt),
4793 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4795 access_tx_launch_fifo5_cor_err_cnt),
4796 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4798 access_tx_launch_fifo4_cor_err_cnt),
4799 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4801 access_tx_launch_fifo3_cor_err_cnt),
4802 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4804 access_tx_launch_fifo2_cor_err_cnt),
4805 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4807 access_tx_launch_fifo1_cor_err_cnt),
4808 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4810 access_tx_launch_fifo0_cor_err_cnt),
4811 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4813 access_tx_credit_return_vl_err_cnt),
4814 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4816 access_tx_hcrc_insertion_err_cnt),
4817 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4819 access_tx_egress_fifo_unc_err_cnt),
4820 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4822 access_tx_read_pio_memory_unc_err_cnt),
4823 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4825 access_tx_read_sdma_memory_unc_err_cnt),
4826 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4828 access_tx_sb_hdr_unc_err_cnt),
4829 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4831 access_tx_credit_return_partiy_err_cnt),
4832 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4834 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4835 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4837 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4838 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4840 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4841 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4843 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4844 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4846 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4847 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4849 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4850 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4852 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4853 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4855 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4856 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4858 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4859 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4861 access_tx_sdma15_disallowed_packet_err_cnt),
4862 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4864 access_tx_sdma14_disallowed_packet_err_cnt),
4865 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4867 access_tx_sdma13_disallowed_packet_err_cnt),
4868 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4870 access_tx_sdma12_disallowed_packet_err_cnt),
4871 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4873 access_tx_sdma11_disallowed_packet_err_cnt),
4874 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4876 access_tx_sdma10_disallowed_packet_err_cnt),
4877 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4879 access_tx_sdma9_disallowed_packet_err_cnt),
4880 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4882 access_tx_sdma8_disallowed_packet_err_cnt),
4883 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4885 access_tx_sdma7_disallowed_packet_err_cnt),
4886 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4888 access_tx_sdma6_disallowed_packet_err_cnt),
4889 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4891 access_tx_sdma5_disallowed_packet_err_cnt),
4892 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4894 access_tx_sdma4_disallowed_packet_err_cnt),
4895 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4897 access_tx_sdma3_disallowed_packet_err_cnt),
4898 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4900 access_tx_sdma2_disallowed_packet_err_cnt),
4901 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4903 access_tx_sdma1_disallowed_packet_err_cnt),
4904 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4906 access_tx_sdma0_disallowed_packet_err_cnt),
4907 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4909 access_tx_config_parity_err_cnt),
4910 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4912 access_tx_sbrd_ctl_csr_parity_err_cnt),
4913 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4915 access_tx_launch_csr_parity_err_cnt),
4916 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4918 access_tx_illegal_vl_err_cnt),
4919 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4920 "TxSbrdCtlStateMachineParityErr", 0, 0,
4922 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4923 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4925 access_egress_reserved_10_err_cnt),
4926 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4928 access_egress_reserved_9_err_cnt),
4929 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4931 access_tx_sdma_launch_intf_parity_err_cnt),
4932 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4934 access_tx_pio_launch_intf_parity_err_cnt),
4935 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4937 access_egress_reserved_6_err_cnt),
4938 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4940 access_tx_incorrect_link_state_err_cnt),
4941 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4943 access_tx_linkdown_err_cnt),
4944 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4945 "EgressFifoUnderrunOrParityErr", 0, 0,
4947 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4948 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4950 access_egress_reserved_2_err_cnt),
4951 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4953 access_tx_pkt_integrity_mem_unc_err_cnt),
4954 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4956 access_tx_pkt_integrity_mem_cor_err_cnt),
4958 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4960 access_send_csr_write_bad_addr_err_cnt),
4961 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4963 access_send_csr_read_bad_addr_err_cnt),
4964 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4966 access_send_csr_parity_cnt),
4967 /* SendCtxtErrStatus */
4968 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4970 access_pio_write_out_of_bounds_err_cnt),
4971 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4973 access_pio_write_overflow_err_cnt),
4974 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4976 access_pio_write_crosses_boundary_err_cnt),
4977 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4979 access_pio_disallowed_packet_err_cnt),
4980 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4982 access_pio_inconsistent_sop_err_cnt),
4983 /* SendDmaEngErrStatus */
4984 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4986 access_sdma_header_request_fifo_cor_err_cnt),
4987 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4989 access_sdma_header_storage_cor_err_cnt),
4990 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4992 access_sdma_packet_tracking_cor_err_cnt),
4993 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4995 access_sdma_assembly_cor_err_cnt),
4996 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4998 access_sdma_desc_table_cor_err_cnt),
4999 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5001 access_sdma_header_request_fifo_unc_err_cnt),
5002 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5004 access_sdma_header_storage_unc_err_cnt),
5005 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5007 access_sdma_packet_tracking_unc_err_cnt),
5008 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5010 access_sdma_assembly_unc_err_cnt),
5011 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5013 access_sdma_desc_table_unc_err_cnt),
5014 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5016 access_sdma_timeout_err_cnt),
5017 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5019 access_sdma_header_length_err_cnt),
5020 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5022 access_sdma_header_address_err_cnt),
5023 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5025 access_sdma_header_select_err_cnt),
5026 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5028 access_sdma_reserved_9_err_cnt),
5029 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5031 access_sdma_packet_desc_overflow_err_cnt),
5032 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5034 access_sdma_length_mismatch_err_cnt),
5035 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5037 access_sdma_halt_err_cnt),
5038 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5040 access_sdma_mem_read_err_cnt),
5041 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5043 access_sdma_first_desc_err_cnt),
5044 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5046 access_sdma_tail_out_of_bounds_err_cnt),
5047 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5049 access_sdma_too_long_err_cnt),
5050 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5052 access_sdma_gen_mismatch_err_cnt),
5053 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5055 access_sdma_wrong_dw_err_cnt),
5058 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5059 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5061 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5063 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5065 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5067 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5069 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5071 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5073 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5074 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5075 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5076 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5077 CNTR_SYNTH | CNTR_VL),
5078 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5079 CNTR_SYNTH | CNTR_VL),
5080 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5081 CNTR_SYNTH | CNTR_VL),
5082 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5083 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5084 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5085 access_sw_link_dn_cnt),
5086 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5087 access_sw_link_up_cnt),
5088 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5089 access_sw_unknown_frame_cnt),
5090 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5091 access_sw_xmit_discards),
5092 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5093 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5094 access_sw_xmit_discards),
5095 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5096 access_xmit_constraint_errs),
5097 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5098 access_rcv_constraint_errs),
5099 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5100 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5101 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5102 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5103 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5104 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5105 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5106 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5107 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5108 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5109 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5110 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5111 [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5112 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5113 access_sw_cpu_rc_acks),
5114 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5115 access_sw_cpu_rc_qacks),
5116 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5117 access_sw_cpu_rc_delayed_comp),
5118 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5119 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5120 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5121 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5122 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5123 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5124 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5125 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5126 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5127 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5128 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5129 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5130 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5131 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5132 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5133 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5134 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5135 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5136 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5137 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5138 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5139 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5140 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5141 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5142 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5143 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5144 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5145 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5146 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5147 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5148 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5149 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5150 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5151 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5152 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5153 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5154 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5155 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5156 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5157 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5158 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5159 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5160 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5161 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5162 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5163 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5164 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5165 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5166 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5167 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5168 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5169 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5170 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5171 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5172 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5173 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5174 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5175 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5176 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5177 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5178 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5179 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5180 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5181 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5182 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5183 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5184 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5185 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5186 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5187 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5188 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5189 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5190 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5191 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5192 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5193 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5194 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5195 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5196 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5197 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5200 /* ======================================================================== */
5202 /* return true if this is chip revision revision a */
5203 int is_ax(struct hfi1_devdata *dd)
5206 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5207 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5208 return (chip_rev_minor & 0xf0) == 0;
5211 /* return true if this is chip revision revision b */
5212 int is_bx(struct hfi1_devdata *dd)
5215 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5216 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5217 return (chip_rev_minor & 0xF0) == 0x10;
5220 /* return true is kernel urg disabled for rcd */
5221 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5224 u32 is = IS_RCVURGENT_START + rcd->ctxt;
5227 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5228 return !(mask & BIT_ULL(bit));
5232 * Append string s to buffer buf. Arguments curp and len are the current
5233 * position and remaining length, respectively.
5235 * return 0 on success, 1 on out of room
5237 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5241 int result = 0; /* success */
5244 /* add a comma, if first in the buffer */
5247 result = 1; /* out of room */
5254 /* copy the string */
5255 while ((c = *s++) != 0) {
5257 result = 1; /* out of room */
5265 /* write return values */
5273 * Using the given flag table, print a comma separated string into
5274 * the buffer. End in '*' if the buffer is too short.
5276 static char *flag_string(char *buf, int buf_len, u64 flags,
5277 struct flag_table *table, int table_size)
5285 /* make sure there is at least 2 so we can form "*" */
5289 len--; /* leave room for a nul */
5290 for (i = 0; i < table_size; i++) {
5291 if (flags & table[i].flag) {
5292 no_room = append_str(buf, &p, &len, table[i].str);
5295 flags &= ~table[i].flag;
5299 /* any undocumented bits left? */
5300 if (!no_room && flags) {
5301 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5302 no_room = append_str(buf, &p, &len, extra);
5305 /* add * if ran out of room */
5307 /* may need to back up to add space for a '*' */
5313 /* add final nul - space already allocated above */
5318 /* first 8 CCE error interrupt source names */
5319 static const char * const cce_misc_names[] = {
5320 "CceErrInt", /* 0 */
5321 "RxeErrInt", /* 1 */
5322 "MiscErrInt", /* 2 */
5323 "Reserved3", /* 3 */
5324 "PioErrInt", /* 4 */
5325 "SDmaErrInt", /* 5 */
5326 "EgressErrInt", /* 6 */
5331 * Return the miscellaneous error interrupt name.
5333 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5335 if (source < ARRAY_SIZE(cce_misc_names))
5336 strncpy(buf, cce_misc_names[source], bsize);
5338 snprintf(buf, bsize, "Reserved%u",
5339 source + IS_GENERAL_ERR_START);
5345 * Return the SDMA engine error interrupt name.
5347 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5349 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5354 * Return the send context error interrupt name.
5356 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5358 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5362 static const char * const various_names[] = {
5371 * Return the various interrupt name.
5373 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5375 if (source < ARRAY_SIZE(various_names))
5376 strncpy(buf, various_names[source], bsize);
5378 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5383 * Return the DC interrupt name.
5385 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5387 static const char * const dc_int_names[] = {
5391 "lbm" /* local block merge */
5394 if (source < ARRAY_SIZE(dc_int_names))
5395 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5397 snprintf(buf, bsize, "DCInt%u", source);
5401 static const char * const sdma_int_names[] = {
5408 * Return the SDMA engine interrupt name.
5410 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5412 /* what interrupt */
5413 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5415 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5417 if (likely(what < 3))
5418 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5420 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5425 * Return the receive available interrupt name.
5427 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5429 snprintf(buf, bsize, "RcvAvailInt%u", source);
5434 * Return the receive urgent interrupt name.
5436 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5438 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5443 * Return the send credit interrupt name.
5445 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5447 snprintf(buf, bsize, "SendCreditInt%u", source);
5452 * Return the reserved interrupt name.
5454 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5456 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5460 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5462 return flag_string(buf, buf_len, flags,
5463 cce_err_status_flags,
5464 ARRAY_SIZE(cce_err_status_flags));
5467 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5469 return flag_string(buf, buf_len, flags,
5470 rxe_err_status_flags,
5471 ARRAY_SIZE(rxe_err_status_flags));
5474 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5476 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5477 ARRAY_SIZE(misc_err_status_flags));
5480 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5482 return flag_string(buf, buf_len, flags,
5483 pio_err_status_flags,
5484 ARRAY_SIZE(pio_err_status_flags));
5487 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5489 return flag_string(buf, buf_len, flags,
5490 sdma_err_status_flags,
5491 ARRAY_SIZE(sdma_err_status_flags));
5494 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5496 return flag_string(buf, buf_len, flags,
5497 egress_err_status_flags,
5498 ARRAY_SIZE(egress_err_status_flags));
5501 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5503 return flag_string(buf, buf_len, flags,
5504 egress_err_info_flags,
5505 ARRAY_SIZE(egress_err_info_flags));
5508 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5510 return flag_string(buf, buf_len, flags,
5511 send_err_status_flags,
5512 ARRAY_SIZE(send_err_status_flags));
5515 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5521 * For most these errors, there is nothing that can be done except
5522 * report or record it.
5524 dd_dev_info(dd, "CCE Error: %s\n",
5525 cce_err_status_string(buf, sizeof(buf), reg));
5527 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5528 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5529 /* this error requires a manual drop into SPC freeze mode */
5531 start_freeze_handling(dd->pport, FREEZE_SELF);
5534 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5535 if (reg & (1ull << i)) {
5536 incr_cntr64(&dd->cce_err_status_cnt[i]);
5537 /* maintain a counter over all cce_err_status errors */
5538 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5544 * Check counters for receive errors that do not have an interrupt
5545 * associated with them.
5547 #define RCVERR_CHECK_TIME 10
5548 static void update_rcverr_timer(struct timer_list *t)
5550 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5551 struct hfi1_pportdata *ppd = dd->pport;
5552 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5554 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5555 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5556 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5557 set_link_down_reason(
5558 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5559 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5560 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5562 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5564 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5567 static int init_rcverr(struct hfi1_devdata *dd)
5569 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5570 /* Assume the hardware counter has been reset */
5571 dd->rcv_ovfl_cnt = 0;
5572 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5575 static void free_rcverr(struct hfi1_devdata *dd)
5577 if (dd->rcverr_timer.function)
5578 del_timer_sync(&dd->rcverr_timer);
5581 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5586 dd_dev_info(dd, "Receive Error: %s\n",
5587 rxe_err_status_string(buf, sizeof(buf), reg));
5589 if (reg & ALL_RXE_FREEZE_ERR) {
5593 * Freeze mode recovery is disabled for the errors
5594 * in RXE_FREEZE_ABORT_MASK
5596 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5597 flags = FREEZE_ABORT;
5599 start_freeze_handling(dd->pport, flags);
5602 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5603 if (reg & (1ull << i))
5604 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5608 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5613 dd_dev_info(dd, "Misc Error: %s",
5614 misc_err_status_string(buf, sizeof(buf), reg));
5615 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5616 if (reg & (1ull << i))
5617 incr_cntr64(&dd->misc_err_status_cnt[i]);
5621 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5626 dd_dev_info(dd, "PIO Error: %s\n",
5627 pio_err_status_string(buf, sizeof(buf), reg));
5629 if (reg & ALL_PIO_FREEZE_ERR)
5630 start_freeze_handling(dd->pport, 0);
5632 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5633 if (reg & (1ull << i))
5634 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5638 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5643 dd_dev_info(dd, "SDMA Error: %s\n",
5644 sdma_err_status_string(buf, sizeof(buf), reg));
5646 if (reg & ALL_SDMA_FREEZE_ERR)
5647 start_freeze_handling(dd->pport, 0);
5649 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5650 if (reg & (1ull << i))
5651 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5655 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5657 incr_cntr64(&ppd->port_xmit_discards);
5660 static void count_port_inactive(struct hfi1_devdata *dd)
5662 __count_port_discards(dd->pport);
5666 * We have had a "disallowed packet" error during egress. Determine the
5667 * integrity check which failed, and update relevant error counter, etc.
5669 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5670 * bit of state per integrity check, and so we can miss the reason for an
5671 * egress error if more than one packet fails the same integrity check
5672 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5674 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5677 struct hfi1_pportdata *ppd = dd->pport;
5678 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5679 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5682 /* clear down all observed info as quickly as possible after read */
5683 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5686 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5687 info, egress_err_info_string(buf, sizeof(buf), info), src);
5689 /* Eventually add other counters for each bit */
5690 if (info & PORT_DISCARD_EGRESS_ERRS) {
5694 * Count all applicable bits as individual errors and
5695 * attribute them to the packet that triggered this handler.
5696 * This may not be completely accurate due to limitations
5697 * on the available hardware error information. There is
5698 * a single information register and any number of error
5699 * packets may have occurred and contributed to it before
5700 * this routine is called. This means that:
5701 * a) If multiple packets with the same error occur before
5702 * this routine is called, earlier packets are missed.
5703 * There is only a single bit for each error type.
5704 * b) Errors may not be attributed to the correct VL.
5705 * The driver is attributing all bits in the info register
5706 * to the packet that triggered this call, but bits
5707 * could be an accumulation of different packets with
5709 * c) A single error packet may have multiple counts attached
5710 * to it. There is no way for the driver to know if
5711 * multiple bits set in the info register are due to a
5712 * single packet or multiple packets. The driver assumes
5715 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5716 for (i = 0; i < weight; i++) {
5717 __count_port_discards(ppd);
5718 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5719 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5721 incr_cntr64(&ppd->port_xmit_discards_vl
5728 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5729 * register. Does it represent a 'port inactive' error?
5731 static inline int port_inactive_err(u64 posn)
5733 return (posn >= SEES(TX_LINKDOWN) &&
5734 posn <= SEES(TX_INCORRECT_LINK_STATE));
5738 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5739 * register. Does it represent a 'disallowed packet' error?
5741 static inline int disallowed_pkt_err(int posn)
5743 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5744 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5748 * Input value is a bit position of one of the SDMA engine disallowed
5749 * packet errors. Return which engine. Use of this must be guarded by
5750 * disallowed_pkt_err().
5752 static inline int disallowed_pkt_engine(int posn)
5754 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5758 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5761 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5763 struct sdma_vl_map *m;
5767 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5771 m = rcu_dereference(dd->sdma_map);
5772 vl = m->engine_to_vl[engine];
5779 * Translate the send context (sofware index) into a VL. Return -1 if the
5780 * translation cannot be done.
5782 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5784 struct send_context_info *sci;
5785 struct send_context *sc;
5788 sci = &dd->send_contexts[sw_index];
5790 /* there is no information for user (PSM) and ack contexts */
5791 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5797 if (dd->vld[15].sc == sc)
5799 for (i = 0; i < num_vls; i++)
5800 if (dd->vld[i].sc == sc)
5806 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5808 u64 reg_copy = reg, handled = 0;
5812 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5813 start_freeze_handling(dd->pport, 0);
5814 else if (is_ax(dd) &&
5815 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5816 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5817 start_freeze_handling(dd->pport, 0);
5820 int posn = fls64(reg_copy);
5821 /* fls64() returns a 1-based offset, we want it zero based */
5822 int shift = posn - 1;
5823 u64 mask = 1ULL << shift;
5825 if (port_inactive_err(shift)) {
5826 count_port_inactive(dd);
5828 } else if (disallowed_pkt_err(shift)) {
5829 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5831 handle_send_egress_err_info(dd, vl);
5840 dd_dev_info(dd, "Egress Error: %s\n",
5841 egress_err_status_string(buf, sizeof(buf), reg));
5843 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5844 if (reg & (1ull << i))
5845 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5849 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5854 dd_dev_info(dd, "Send Error: %s\n",
5855 send_err_status_string(buf, sizeof(buf), reg));
5857 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5858 if (reg & (1ull << i))
5859 incr_cntr64(&dd->send_err_status_cnt[i]);
5864 * The maximum number of times the error clear down will loop before
5865 * blocking a repeating error. This value is arbitrary.
5867 #define MAX_CLEAR_COUNT 20
5870 * Clear and handle an error register. All error interrupts are funneled
5871 * through here to have a central location to correctly handle single-
5872 * or multi-shot errors.
5874 * For non per-context registers, call this routine with a context value
5875 * of 0 so the per-context offset is zero.
5877 * If the handler loops too many times, assume that something is wrong
5878 * and can't be fixed, so mask the error bits.
5880 static void interrupt_clear_down(struct hfi1_devdata *dd,
5882 const struct err_reg_info *eri)
5887 /* read in a loop until no more errors are seen */
5890 reg = read_kctxt_csr(dd, context, eri->status);
5893 write_kctxt_csr(dd, context, eri->clear, reg);
5894 if (likely(eri->handler))
5895 eri->handler(dd, context, reg);
5897 if (count > MAX_CLEAR_COUNT) {
5900 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5903 * Read-modify-write so any other masked bits
5906 mask = read_kctxt_csr(dd, context, eri->mask);
5908 write_kctxt_csr(dd, context, eri->mask, mask);
5915 * CCE block "misc" interrupt. Source is < 16.
5917 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5919 const struct err_reg_info *eri = &misc_errs[source];
5922 interrupt_clear_down(dd, 0, eri);
5924 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5929 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5931 return flag_string(buf, buf_len, flags,
5932 sc_err_status_flags,
5933 ARRAY_SIZE(sc_err_status_flags));
5937 * Send context error interrupt. Source (hw_context) is < 160.
5939 * All send context errors cause the send context to halt. The normal
5940 * clear-down mechanism cannot be used because we cannot clear the
5941 * error bits until several other long-running items are done first.
5942 * This is OK because with the context halted, nothing else is going
5943 * to happen on it anyway.
5945 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5946 unsigned int hw_context)
5948 struct send_context_info *sci;
5949 struct send_context *sc;
5954 unsigned long irq_flags;
5956 sw_index = dd->hw_to_sw[hw_context];
5957 if (sw_index >= dd->num_send_contexts) {
5959 "out of range sw index %u for send context %u\n",
5960 sw_index, hw_context);
5963 sci = &dd->send_contexts[sw_index];
5964 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5967 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5968 sw_index, hw_context);
5969 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5973 /* tell the software that a halt has begun */
5974 sc_stop(sc, SCF_HALTED);
5976 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5978 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5979 send_context_err_status_string(flags, sizeof(flags),
5982 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5983 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5986 * Automatically restart halted kernel contexts out of interrupt
5987 * context. User contexts must ask the driver to restart the context.
5989 if (sc->type != SC_USER)
5990 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5991 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5994 * Update the counters for the corresponding status bits.
5995 * Note that these particular counters are aggregated over all
5998 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5999 if (status & (1ull << i))
6000 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6004 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6005 unsigned int source, u64 status)
6007 struct sdma_engine *sde;
6010 sde = &dd->per_sdma[source];
6011 #ifdef CONFIG_SDMA_VERBOSITY
6012 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6013 slashstrip(__FILE__), __LINE__, __func__);
6014 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6015 sde->this_idx, source, (unsigned long long)status);
6018 sdma_engine_error(sde, status);
6021 * Update the counters for the corresponding status bits.
6022 * Note that these particular counters are aggregated over
6023 * all 16 DMA engines.
6025 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6026 if (status & (1ull << i))
6027 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6032 * CCE block SDMA error interrupt. Source is < 16.
6034 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6036 #ifdef CONFIG_SDMA_VERBOSITY
6037 struct sdma_engine *sde = &dd->per_sdma[source];
6039 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6040 slashstrip(__FILE__), __LINE__, __func__);
6041 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6043 sdma_dumpstate(sde);
6045 interrupt_clear_down(dd, source, &sdma_eng_err);
6049 * CCE block "various" interrupt. Source is < 8.
6051 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6053 const struct err_reg_info *eri = &various_err[source];
6056 * TCritInt cannot go through interrupt_clear_down()
6057 * because it is not a second tier interrupt. The handler
6058 * should be called directly.
6060 if (source == TCRIT_INT_SOURCE)
6061 handle_temp_err(dd);
6062 else if (eri->handler)
6063 interrupt_clear_down(dd, 0, eri);
6066 "%s: Unimplemented/reserved interrupt %d\n",
6070 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6072 /* src_ctx is always zero */
6073 struct hfi1_pportdata *ppd = dd->pport;
6074 unsigned long flags;
6075 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6077 if (reg & QSFP_HFI0_MODPRST_N) {
6078 if (!qsfp_mod_present(ppd)) {
6079 dd_dev_info(dd, "%s: QSFP module removed\n",
6082 ppd->driver_link_ready = 0;
6084 * Cable removed, reset all our information about the
6085 * cache and cable capabilities
6088 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6090 * We don't set cache_refresh_required here as we expect
6091 * an interrupt when a cable is inserted
6093 ppd->qsfp_info.cache_valid = 0;
6094 ppd->qsfp_info.reset_needed = 0;
6095 ppd->qsfp_info.limiting_active = 0;
6096 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6098 /* Invert the ModPresent pin now to detect plug-in */
6099 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6100 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6102 if ((ppd->offline_disabled_reason >
6104 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6105 (ppd->offline_disabled_reason ==
6106 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6107 ppd->offline_disabled_reason =
6109 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6111 if (ppd->host_link_state == HLS_DN_POLL) {
6113 * The link is still in POLL. This means
6114 * that the normal link down processing
6115 * will not happen. We have to do it here
6116 * before turning the DC off.
6118 queue_work(ppd->link_wq, &ppd->link_down_work);
6121 dd_dev_info(dd, "%s: QSFP module inserted\n",
6124 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6125 ppd->qsfp_info.cache_valid = 0;
6126 ppd->qsfp_info.cache_refresh_required = 1;
6127 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6131 * Stop inversion of ModPresent pin to detect
6132 * removal of the cable
6134 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6135 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6136 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6138 ppd->offline_disabled_reason =
6139 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6143 if (reg & QSFP_HFI0_INT_N) {
6144 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6146 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6147 ppd->qsfp_info.check_interrupt_flags = 1;
6148 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6151 /* Schedule the QSFP work only if there is a cable attached. */
6152 if (qsfp_mod_present(ppd))
6153 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6156 static int request_host_lcb_access(struct hfi1_devdata *dd)
6160 ret = do_8051_command(dd, HCMD_MISC,
6161 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6162 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6163 if (ret != HCMD_SUCCESS) {
6164 dd_dev_err(dd, "%s: command failed with error %d\n",
6167 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6170 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6174 ret = do_8051_command(dd, HCMD_MISC,
6175 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6176 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6177 if (ret != HCMD_SUCCESS) {
6178 dd_dev_err(dd, "%s: command failed with error %d\n",
6181 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6185 * Set the LCB selector - allow host access. The DCC selector always
6186 * points to the host.
6188 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6190 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6191 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6192 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6196 * Clear the LCB selector - allow 8051 access. The DCC selector always
6197 * points to the host.
6199 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6201 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6202 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6206 * Acquire LCB access from the 8051. If the host already has access,
6207 * just increment a counter. Otherwise, inform the 8051 that the
6208 * host is taking access.
6212 * -EBUSY if the 8051 has control and cannot be disturbed
6213 * -errno if unable to acquire access from the 8051
6215 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6217 struct hfi1_pportdata *ppd = dd->pport;
6221 * Use the host link state lock so the operation of this routine
6222 * { link state check, selector change, count increment } can occur
6223 * as a unit against a link state change. Otherwise there is a
6224 * race between the state change and the count increment.
6227 mutex_lock(&ppd->hls_lock);
6229 while (!mutex_trylock(&ppd->hls_lock))
6233 /* this access is valid only when the link is up */
6234 if (ppd->host_link_state & HLS_DOWN) {
6235 dd_dev_info(dd, "%s: link state %s not up\n",
6236 __func__, link_state_name(ppd->host_link_state));
6241 if (dd->lcb_access_count == 0) {
6242 ret = request_host_lcb_access(dd);
6245 "%s: unable to acquire LCB access, err %d\n",
6249 set_host_lcb_access(dd);
6251 dd->lcb_access_count++;
6253 mutex_unlock(&ppd->hls_lock);
6258 * Release LCB access by decrementing the use count. If the count is moving
6259 * from 1 to 0, inform 8051 that it has control back.
6263 * -errno if unable to release access to the 8051
6265 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6270 * Use the host link state lock because the acquire needed it.
6271 * Here, we only need to keep { selector change, count decrement }
6275 mutex_lock(&dd->pport->hls_lock);
6277 while (!mutex_trylock(&dd->pport->hls_lock))
6281 if (dd->lcb_access_count == 0) {
6282 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6287 if (dd->lcb_access_count == 1) {
6288 set_8051_lcb_access(dd);
6289 ret = request_8051_lcb_access(dd);
6292 "%s: unable to release LCB access, err %d\n",
6294 /* restore host access if the grant didn't work */
6295 set_host_lcb_access(dd);
6299 dd->lcb_access_count--;
6301 mutex_unlock(&dd->pport->hls_lock);
6306 * Initialize LCB access variables and state. Called during driver load,
6307 * after most of the initialization is finished.
6309 * The DC default is LCB access on for the host. The driver defaults to
6310 * leaving access to the 8051. Assign access now - this constrains the call
6311 * to this routine to be after all LCB set-up is done. In particular, after
6312 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6314 static void init_lcb_access(struct hfi1_devdata *dd)
6316 dd->lcb_access_count = 0;
6320 * Write a response back to a 8051 request.
6322 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6324 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6325 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6327 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6328 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6332 * Handle host requests from the 8051.
6334 static void handle_8051_request(struct hfi1_pportdata *ppd)
6336 struct hfi1_devdata *dd = ppd->dd;
6341 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6342 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6343 return; /* no request */
6345 /* zero out COMPLETED so the response is seen */
6346 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6348 /* extract request details */
6349 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6350 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6351 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6352 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6355 case HREQ_LOAD_CONFIG:
6356 case HREQ_SAVE_CONFIG:
6357 case HREQ_READ_CONFIG:
6358 case HREQ_SET_TX_EQ_ABS:
6359 case HREQ_SET_TX_EQ_REL:
6361 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6363 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6365 case HREQ_LCB_RESET:
6366 /* Put the LCB, RX FPE and TX FPE into reset */
6367 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6368 /* Make sure the write completed */
6369 (void)read_csr(dd, DCC_CFG_RESET);
6370 /* Hold the reset long enough to take effect */
6372 /* Take the LCB, RX FPE and TX FPE out of reset */
6373 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6374 hreq_response(dd, HREQ_SUCCESS, 0);
6377 case HREQ_CONFIG_DONE:
6378 hreq_response(dd, HREQ_SUCCESS, 0);
6381 case HREQ_INTERFACE_TEST:
6382 hreq_response(dd, HREQ_SUCCESS, data);
6385 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6386 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6392 * Set up allocation unit vaulue.
6394 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6396 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6398 /* do not modify other values in the register */
6399 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6400 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6401 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6405 * Set up initial VL15 credits of the remote. Assumes the rest of
6406 * the CM credit registers are zero from a previous global or credit reset.
6407 * Shared limit for VL15 will always be 0.
6409 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6411 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6413 /* set initial values for total and shared credit limit */
6414 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6415 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6418 * Set total limit to be equal to VL15 credits.
6419 * Leave shared limit at 0.
6421 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6422 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6424 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6425 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6429 * Zero all credit details from the previous connection and
6430 * reset the CM manager's internal counters.
6432 void reset_link_credits(struct hfi1_devdata *dd)
6436 /* remove all previous VL credit limits */
6437 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6438 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6439 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6440 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6441 /* reset the CM block */
6442 pio_send_control(dd, PSC_CM_RESET);
6443 /* reset cached value */
6444 dd->vl15buf_cached = 0;
6447 /* convert a vCU to a CU */
6448 static u32 vcu_to_cu(u8 vcu)
6453 /* convert a CU to a vCU */
6454 static u8 cu_to_vcu(u32 cu)
6459 /* convert a vAU to an AU */
6460 static u32 vau_to_au(u8 vau)
6462 return 8 * (1 << vau);
6465 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6467 ppd->sm_trap_qp = 0x0;
6472 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6474 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6478 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6479 write_csr(dd, DC_LCB_CFG_RUN, 0);
6480 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6481 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6482 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6483 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6484 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6485 reg = read_csr(dd, DCC_CFG_RESET);
6486 write_csr(dd, DCC_CFG_RESET, reg |
6487 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6488 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6490 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6491 write_csr(dd, DCC_CFG_RESET, reg);
6492 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6497 * This routine should be called after the link has been transitioned to
6498 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6501 * The expectation is that the caller of this routine would have taken
6502 * care of properly transitioning the link into the correct state.
6503 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6504 * before calling this function.
6506 static void _dc_shutdown(struct hfi1_devdata *dd)
6508 lockdep_assert_held(&dd->dc8051_lock);
6510 if (dd->dc_shutdown)
6513 dd->dc_shutdown = 1;
6514 /* Shutdown the LCB */
6515 lcb_shutdown(dd, 1);
6517 * Going to OFFLINE would have causes the 8051 to put the
6518 * SerDes into reset already. Just need to shut down the 8051,
6521 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6524 static void dc_shutdown(struct hfi1_devdata *dd)
6526 mutex_lock(&dd->dc8051_lock);
6528 mutex_unlock(&dd->dc8051_lock);
6532 * Calling this after the DC has been brought out of reset should not
6534 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6535 * before calling this function.
6537 static void _dc_start(struct hfi1_devdata *dd)
6539 lockdep_assert_held(&dd->dc8051_lock);
6541 if (!dd->dc_shutdown)
6544 /* Take the 8051 out of reset */
6545 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6546 /* Wait until 8051 is ready */
6547 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6548 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6551 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6552 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6553 /* lcb_shutdown() with abort=1 does not restore these */
6554 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6555 dd->dc_shutdown = 0;
6558 static void dc_start(struct hfi1_devdata *dd)
6560 mutex_lock(&dd->dc8051_lock);
6562 mutex_unlock(&dd->dc8051_lock);
6566 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6568 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6570 u64 rx_radr, tx_radr;
6573 if (dd->icode != ICODE_FPGA_EMULATION)
6577 * These LCB defaults on emulator _s are good, nothing to do here:
6578 * LCB_CFG_TX_FIFOS_RADR
6579 * LCB_CFG_RX_FIFOS_RADR
6581 * LCB_CFG_IGNORE_LOST_RCLK
6583 if (is_emulator_s(dd))
6585 /* else this is _p */
6587 version = emulator_rev(dd);
6589 version = 0x2d; /* all B0 use 0x2d or higher settings */
6591 if (version <= 0x12) {
6592 /* release 0x12 and below */
6595 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6596 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6597 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6600 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6601 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6602 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6604 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6605 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6607 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6608 } else if (version <= 0x18) {
6609 /* release 0x13 up to 0x18 */
6610 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6612 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6613 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6614 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6615 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6616 } else if (version == 0x19) {
6618 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6620 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6621 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6622 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6623 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6624 } else if (version == 0x1a) {
6626 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6628 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6629 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6630 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6631 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6632 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6634 /* release 0x1b and higher */
6635 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6637 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6638 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6639 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6640 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6643 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6644 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6645 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6646 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6647 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6651 * Handle a SMA idle message
6653 * This is a work-queue function outside of the interrupt.
6655 void handle_sma_message(struct work_struct *work)
6657 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6659 struct hfi1_devdata *dd = ppd->dd;
6664 * msg is bytes 1-4 of the 40-bit idle message - the command code
6667 ret = read_idle_sma(dd, &msg);
6670 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6672 * React to the SMA message. Byte[1] (0 for us) is the command.
6674 switch (msg & 0xff) {
6677 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6680 * Only expected in INIT or ARMED, discard otherwise.
6682 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6683 ppd->neighbor_normal = 1;
6685 case SMA_IDLE_ACTIVE:
6687 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6690 * Can activate the node. Discard otherwise.
6692 if (ppd->host_link_state == HLS_UP_ARMED &&
6693 ppd->is_active_optimize_enabled) {
6694 ppd->neighbor_normal = 1;
6695 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6699 "%s: received Active SMA idle message, couldn't set link to Active\n",
6705 "%s: received unexpected SMA idle message 0x%llx\n",
6711 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6714 unsigned long flags;
6716 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6717 rcvctrl = read_csr(dd, RCV_CTRL);
6720 write_csr(dd, RCV_CTRL, rcvctrl);
6721 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6724 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6726 adjust_rcvctrl(dd, add, 0);
6729 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6731 adjust_rcvctrl(dd, 0, clear);
6735 * Called from all interrupt handlers to start handling an SPC freeze.
6737 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6739 struct hfi1_devdata *dd = ppd->dd;
6740 struct send_context *sc;
6744 if (flags & FREEZE_SELF)
6745 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6747 /* enter frozen mode */
6748 dd->flags |= HFI1_FROZEN;
6750 /* notify all SDMA engines that they are going into a freeze */
6751 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6753 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6755 /* do halt pre-handling on all enabled send contexts */
6756 for (i = 0; i < dd->num_send_contexts; i++) {
6757 sc = dd->send_contexts[i].sc;
6758 if (sc && (sc->flags & SCF_ENABLED))
6759 sc_stop(sc, sc_flags);
6762 /* Send context are frozen. Notify user space */
6763 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6765 if (flags & FREEZE_ABORT) {
6767 "Aborted freeze recovery. Please REBOOT system\n");
6770 /* queue non-interrupt handler */
6771 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6775 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6776 * depending on the "freeze" parameter.
6778 * No need to return an error if it times out, our only option
6779 * is to proceed anyway.
6781 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6783 unsigned long timeout;
6786 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6788 reg = read_csr(dd, CCE_STATUS);
6790 /* waiting until all indicators are set */
6791 if ((reg & ALL_FROZE) == ALL_FROZE)
6792 return; /* all done */
6794 /* waiting until all indicators are clear */
6795 if ((reg & ALL_FROZE) == 0)
6796 return; /* all done */
6799 if (time_after(jiffies, timeout)) {
6801 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6802 freeze ? "" : "un", reg & ALL_FROZE,
6803 freeze ? ALL_FROZE : 0ull);
6806 usleep_range(80, 120);
6811 * Do all freeze handling for the RXE block.
6813 static void rxe_freeze(struct hfi1_devdata *dd)
6816 struct hfi1_ctxtdata *rcd;
6819 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6821 /* disable all receive contexts */
6822 for (i = 0; i < dd->num_rcv_contexts; i++) {
6823 rcd = hfi1_rcd_get_by_index(dd, i);
6824 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6830 * Unfreeze handling for the RXE block - kernel contexts only.
6831 * This will also enable the port. User contexts will do unfreeze
6832 * handling on a per-context basis as they call into the driver.
6835 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6839 struct hfi1_ctxtdata *rcd;
6841 /* enable all kernel contexts */
6842 for (i = 0; i < dd->num_rcv_contexts; i++) {
6843 rcd = hfi1_rcd_get_by_index(dd, i);
6845 /* Ensure all non-user contexts(including vnic) are enabled */
6847 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6851 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6852 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6853 rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
6854 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6855 hfi1_rcvctrl(dd, rcvmask, rcd);
6860 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6864 * Non-interrupt SPC freeze handling.
6866 * This is a work-queue function outside of the triggering interrupt.
6868 void handle_freeze(struct work_struct *work)
6870 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6872 struct hfi1_devdata *dd = ppd->dd;
6874 /* wait for freeze indicators on all affected blocks */
6875 wait_for_freeze_status(dd, 1);
6877 /* SPC is now frozen */
6879 /* do send PIO freeze steps */
6882 /* do send DMA freeze steps */
6885 /* do send egress freeze steps - nothing to do */
6887 /* do receive freeze steps */
6891 * Unfreeze the hardware - clear the freeze, wait for each
6892 * block's frozen bit to clear, then clear the frozen flag.
6894 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6895 wait_for_freeze_status(dd, 0);
6898 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6899 wait_for_freeze_status(dd, 1);
6900 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6901 wait_for_freeze_status(dd, 0);
6904 /* do send PIO unfreeze steps for kernel contexts */
6905 pio_kernel_unfreeze(dd);
6907 /* do send DMA unfreeze steps */
6910 /* do send egress unfreeze steps - nothing to do */
6912 /* do receive unfreeze steps for kernel contexts */
6913 rxe_kernel_unfreeze(dd);
6916 * The unfreeze procedure touches global device registers when
6917 * it disables and re-enables RXE. Mark the device unfrozen
6918 * after all that is done so other parts of the driver waiting
6919 * for the device to unfreeze don't do things out of order.
6921 * The above implies that the meaning of HFI1_FROZEN flag is
6922 * "Device has gone into freeze mode and freeze mode handling
6923 * is still in progress."
6925 * The flag will be removed when freeze mode processing has
6928 dd->flags &= ~HFI1_FROZEN;
6929 wake_up(&dd->event_queue);
6931 /* no longer frozen */
6935 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6937 * @ppd: info of physical Hfi port
6938 * @link_width: new link width after link up or downgrade
6940 * Update the PortXmitWait and PortVlXmitWait counters after
6941 * a link up or downgrade event to reflect a link width change.
6943 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6949 tx_width = tx_link_width(link_width);
6950 link_speed = get_link_speed(ppd->link_speed_active);
6953 * There are C_VL_COUNT number of PortVLXmitWait counters.
6954 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6956 for (i = 0; i < C_VL_COUNT + 1; i++)
6957 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6961 * Handle a link up interrupt from the 8051.
6963 * This is a work-queue function outside of the interrupt.
6965 void handle_link_up(struct work_struct *work)
6967 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6969 struct hfi1_devdata *dd = ppd->dd;
6971 set_link_state(ppd, HLS_UP_INIT);
6973 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6976 * OPA specifies that certain counters are cleared on a transition
6977 * to link up, so do that.
6979 clear_linkup_counters(dd);
6981 * And (re)set link up default values.
6983 set_linkup_defaults(ppd);
6986 * Set VL15 credits. Use cached value from verify cap interrupt.
6987 * In case of quick linkup or simulator, vl15 value will be set by
6988 * handle_linkup_change. VerifyCap interrupt handler will not be
6989 * called in those scenarios.
6991 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6992 set_up_vl15(dd, dd->vl15buf_cached);
6994 /* enforce link speed enabled */
6995 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6996 /* oops - current speed is not enabled, bounce */
6998 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6999 ppd->link_speed_active, ppd->link_speed_enabled);
7000 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7001 OPA_LINKDOWN_REASON_SPEED_POLICY);
7002 set_link_state(ppd, HLS_DN_OFFLINE);
7008 * Several pieces of LNI information were cached for SMA in ppd.
7009 * Reset these on link down
7011 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7013 ppd->neighbor_guid = 0;
7014 ppd->neighbor_port_number = 0;
7015 ppd->neighbor_type = 0;
7016 ppd->neighbor_fm_security = 0;
7019 static const char * const link_down_reason_strs[] = {
7020 [OPA_LINKDOWN_REASON_NONE] = "None",
7021 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7022 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7023 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7024 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7025 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7026 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7027 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7028 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7029 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7030 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7031 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7032 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7033 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7034 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7035 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7036 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7037 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7038 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7039 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7040 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7041 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7042 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7043 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7044 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7045 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7046 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7047 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7048 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7049 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7050 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7051 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7052 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7053 "Excessive buffer overrun",
7054 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7055 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7056 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7057 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7058 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7059 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7060 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7061 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7062 "Local media not installed",
7063 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7064 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7065 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7066 "End to end not installed",
7067 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7068 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7069 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7070 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7071 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7072 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7075 /* return the neighbor link down reason string */
7076 static const char *link_down_reason_str(u8 reason)
7078 const char *str = NULL;
7080 if (reason < ARRAY_SIZE(link_down_reason_strs))
7081 str = link_down_reason_strs[reason];
7089 * Handle a link down interrupt from the 8051.
7091 * This is a work-queue function outside of the interrupt.
7093 void handle_link_down(struct work_struct *work)
7095 u8 lcl_reason, neigh_reason = 0;
7096 u8 link_down_reason;
7097 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7100 static const char ldr_str[] = "Link down reason: ";
7102 if ((ppd->host_link_state &
7103 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7104 ppd->port_type == PORT_TYPE_FIXED)
7105 ppd->offline_disabled_reason =
7106 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7108 /* Go offline first, then deal with reading/writing through 8051 */
7109 was_up = !!(ppd->host_link_state & HLS_UP);
7110 set_link_state(ppd, HLS_DN_OFFLINE);
7111 xchg(&ppd->is_link_down_queued, 0);
7115 /* link down reason is only valid if the link was up */
7116 read_link_down_reason(ppd->dd, &link_down_reason);
7117 switch (link_down_reason) {
7118 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7119 /* the link went down, no idle message reason */
7120 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7123 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7125 * The neighbor reason is only valid if an idle message
7126 * was received for it.
7128 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7129 dd_dev_info(ppd->dd,
7130 "%sNeighbor link down message %d, %s\n",
7131 ldr_str, neigh_reason,
7132 link_down_reason_str(neigh_reason));
7134 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7135 dd_dev_info(ppd->dd,
7136 "%sHost requested link to go offline\n",
7140 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7141 ldr_str, link_down_reason);
7146 * If no reason, assume peer-initiated but missed
7147 * LinkGoingDown idle flits.
7149 if (neigh_reason == 0)
7150 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7152 /* went down while polling or going up */
7153 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7156 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7158 /* inform the SMA when the link transitions from up to down */
7159 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7160 ppd->neigh_link_down_reason.sma == 0) {
7161 ppd->local_link_down_reason.sma =
7162 ppd->local_link_down_reason.latest;
7163 ppd->neigh_link_down_reason.sma =
7164 ppd->neigh_link_down_reason.latest;
7167 reset_neighbor_info(ppd);
7169 /* disable the port */
7170 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7173 * If there is no cable attached, turn the DC off. Otherwise,
7174 * start the link bring up.
7176 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7177 dc_shutdown(ppd->dd);
7182 void handle_link_bounce(struct work_struct *work)
7184 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7188 * Only do something if the link is currently up.
7190 if (ppd->host_link_state & HLS_UP) {
7191 set_link_state(ppd, HLS_DN_OFFLINE);
7194 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7195 __func__, link_state_name(ppd->host_link_state));
7200 * Mask conversion: Capability exchange to Port LTP. The capability
7201 * exchange has an implicit 16b CRC that is mandatory.
7203 static int cap_to_port_ltp(int cap)
7205 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7207 if (cap & CAP_CRC_14B)
7208 port_ltp |= PORT_LTP_CRC_MODE_14;
7209 if (cap & CAP_CRC_48B)
7210 port_ltp |= PORT_LTP_CRC_MODE_48;
7211 if (cap & CAP_CRC_12B_16B_PER_LANE)
7212 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7218 * Convert an OPA Port LTP mask to capability mask
7220 int port_ltp_to_cap(int port_ltp)
7224 if (port_ltp & PORT_LTP_CRC_MODE_14)
7225 cap_mask |= CAP_CRC_14B;
7226 if (port_ltp & PORT_LTP_CRC_MODE_48)
7227 cap_mask |= CAP_CRC_48B;
7228 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7229 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7235 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7237 static int lcb_to_port_ltp(int lcb_crc)
7241 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7242 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7243 else if (lcb_crc == LCB_CRC_48B)
7244 port_ltp = PORT_LTP_CRC_MODE_48;
7245 else if (lcb_crc == LCB_CRC_14B)
7246 port_ltp = PORT_LTP_CRC_MODE_14;
7248 port_ltp = PORT_LTP_CRC_MODE_16;
7253 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7255 if (ppd->pkeys[2] != 0) {
7257 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7258 hfi1_event_pkey_change(ppd->dd, ppd->port);
7263 * Convert the given link width to the OPA link width bitmask.
7265 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7270 * Simulator and quick linkup do not set the width.
7271 * Just set it to 4x without complaint.
7273 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7274 return OPA_LINK_WIDTH_4X;
7275 return 0; /* no lanes up */
7276 case 1: return OPA_LINK_WIDTH_1X;
7277 case 2: return OPA_LINK_WIDTH_2X;
7278 case 3: return OPA_LINK_WIDTH_3X;
7279 case 4: return OPA_LINK_WIDTH_4X;
7281 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7283 return OPA_LINK_WIDTH_4X;
7288 * Do a population count on the bottom nibble.
7290 static const u8 bit_counts[16] = {
7291 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7294 static inline u8 nibble_to_count(u8 nibble)
7296 return bit_counts[nibble & 0xf];
7300 * Read the active lane information from the 8051 registers and return
7303 * Active lane information is found in these 8051 registers:
7307 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7313 u8 tx_polarity_inversion;
7314 u8 rx_polarity_inversion;
7317 /* read the active lanes */
7318 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7319 &rx_polarity_inversion, &max_rate);
7320 read_local_lni(dd, &enable_lane_rx);
7322 /* convert to counts */
7323 tx = nibble_to_count(enable_lane_tx);
7324 rx = nibble_to_count(enable_lane_rx);
7327 * Set link_speed_active here, overriding what was set in
7328 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7329 * set the max_rate field in handle_verify_cap until v0.19.
7331 if ((dd->icode == ICODE_RTL_SILICON) &&
7332 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7333 /* max_rate: 0 = 12.5G, 1 = 25G */
7336 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7339 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7343 "%s: unexpected max rate %d, using 25Gb\n",
7344 __func__, (int)max_rate);
7345 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7351 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7352 enable_lane_tx, tx, enable_lane_rx, rx);
7353 *tx_width = link_width_to_bits(dd, tx);
7354 *rx_width = link_width_to_bits(dd, rx);
7358 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7359 * Valid after the end of VerifyCap and during LinkUp. Does not change
7360 * after link up. I.e. look elsewhere for downgrade information.
7363 * + bits [7:4] contain the number of active transmitters
7364 * + bits [3:0] contain the number of active receivers
7365 * These are numbers 1 through 4 and can be different values if the
7366 * link is asymmetric.
7368 * verify_cap_local_fm_link_width[0] retains its original value.
7370 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7374 u8 misc_bits, local_flags;
7375 u16 active_tx, active_rx;
7377 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7379 rx = (widths >> 8) & 0xf;
7381 *tx_width = link_width_to_bits(dd, tx);
7382 *rx_width = link_width_to_bits(dd, rx);
7384 /* print the active widths */
7385 get_link_widths(dd, &active_tx, &active_rx);
7389 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7390 * hardware information when the link first comes up.
7392 * The link width is not available until after VerifyCap.AllFramesReceived
7393 * (the trigger for handle_verify_cap), so this is outside that routine
7394 * and should be called when the 8051 signals linkup.
7396 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7398 u16 tx_width, rx_width;
7400 /* get end-of-LNI link widths */
7401 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7403 /* use tx_width as the link is supposed to be symmetric on link up */
7404 ppd->link_width_active = tx_width;
7405 /* link width downgrade active (LWD.A) starts out matching LW.A */
7406 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7407 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7408 /* per OPA spec, on link up LWD.E resets to LWD.S */
7409 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7410 /* cache the active egress rate (units {10^6 bits/sec]) */
7411 ppd->current_egress_rate = active_egress_rate(ppd);
7415 * Handle a verify capabilities interrupt from the 8051.
7417 * This is a work-queue function outside of the interrupt.
7419 void handle_verify_cap(struct work_struct *work)
7421 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7423 struct hfi1_devdata *dd = ppd->dd;
7425 u8 power_management;
7435 u16 active_tx, active_rx;
7436 u8 partner_supported_crc;
7440 set_link_state(ppd, HLS_VERIFY_CAP);
7442 lcb_shutdown(dd, 0);
7443 adjust_lcb_for_fpga_serdes(dd);
7445 read_vc_remote_phy(dd, &power_management, &continuous);
7446 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7447 &partner_supported_crc);
7448 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7449 read_remote_device_id(dd, &device_id, &device_rev);
7451 /* print the active widths */
7452 get_link_widths(dd, &active_tx, &active_rx);
7454 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7455 (int)power_management, (int)continuous);
7457 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7458 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7459 (int)partner_supported_crc);
7460 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7461 (u32)remote_tx_rate, (u32)link_widths);
7462 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7463 (u32)device_id, (u32)device_rev);
7465 * The peer vAU value just read is the peer receiver value. HFI does
7466 * not support a transmit vAU of 0 (AU == 8). We advertised that
7467 * with Z=1 in the fabric capabilities sent to the peer. The peer
7468 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7469 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7470 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7471 * subject to the Z value exception.
7475 set_up_vau(dd, vau);
7478 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7479 * credits value and wait for link-up interrupt ot set it.
7482 dd->vl15buf_cached = vl15buf;
7484 /* set up the LCB CRC mode */
7485 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7487 /* order is important: use the lowest bit in common */
7488 if (crc_mask & CAP_CRC_14B)
7489 crc_val = LCB_CRC_14B;
7490 else if (crc_mask & CAP_CRC_48B)
7491 crc_val = LCB_CRC_48B;
7492 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7493 crc_val = LCB_CRC_12B_16B_PER_LANE;
7495 crc_val = LCB_CRC_16B;
7497 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7498 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7499 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7501 /* set (14b only) or clear sideband credit */
7502 reg = read_csr(dd, SEND_CM_CTRL);
7503 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7504 write_csr(dd, SEND_CM_CTRL,
7505 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7507 write_csr(dd, SEND_CM_CTRL,
7508 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7511 ppd->link_speed_active = 0; /* invalid value */
7512 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7513 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7514 switch (remote_tx_rate) {
7516 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7519 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7523 /* actual rate is highest bit of the ANDed rates */
7524 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7527 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7529 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7531 if (ppd->link_speed_active == 0) {
7532 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7533 __func__, (int)remote_tx_rate);
7534 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7538 * Cache the values of the supported, enabled, and active
7539 * LTP CRC modes to return in 'portinfo' queries. But the bit
7540 * flags that are returned in the portinfo query differ from
7541 * what's in the link_crc_mask, crc_sizes, and crc_val
7542 * variables. Convert these here.
7544 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7545 /* supported crc modes */
7546 ppd->port_ltp_crc_mode |=
7547 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7548 /* enabled crc modes */
7549 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7550 /* active crc mode */
7552 /* set up the remote credit return table */
7553 assign_remote_cm_au_table(dd, vcu);
7556 * The LCB is reset on entry to handle_verify_cap(), so this must
7557 * be applied on every link up.
7559 * Adjust LCB error kill enable to kill the link if
7560 * these RBUF errors are seen:
7561 * REPLAY_BUF_MBE_SMASK
7562 * FLIT_INPUT_BUF_MBE_SMASK
7564 if (is_ax(dd)) { /* fixed in B0 */
7565 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7566 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7567 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7568 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7571 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7572 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7574 /* give 8051 access to the LCB CSRs */
7575 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7576 set_8051_lcb_access(dd);
7578 /* tell the 8051 to go to LinkUp */
7579 set_link_state(ppd, HLS_GOING_UP);
7583 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7584 * policy against the current active link widths.
7585 * @ppd: info of physical Hfi port
7586 * @refresh_widths: True indicates link downgrade event
7587 * @return: True indicates a successful link downgrade. False indicates
7588 * link downgrade event failed and the link will bounce back to
7589 * default link width.
7591 * Called when the enabled policy changes or the active link widths
7593 * Refresh_widths indicates that a link downgrade occurred. The
7594 * link_downgraded variable is set by refresh_widths and
7595 * determines the success/failure of the policy application.
7597 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7598 bool refresh_widths)
7604 bool link_downgraded = refresh_widths;
7606 /* use the hls lock to avoid a race with actual link up */
7609 mutex_lock(&ppd->hls_lock);
7610 /* only apply if the link is up */
7611 if (ppd->host_link_state & HLS_DOWN) {
7612 /* still going up..wait and retry */
7613 if (ppd->host_link_state & HLS_GOING_UP) {
7614 if (++tries < 1000) {
7615 mutex_unlock(&ppd->hls_lock);
7616 usleep_range(100, 120); /* arbitrary */
7620 "%s: giving up waiting for link state change\n",
7626 lwde = ppd->link_width_downgrade_enabled;
7628 if (refresh_widths) {
7629 get_link_widths(ppd->dd, &tx, &rx);
7630 ppd->link_width_downgrade_tx_active = tx;
7631 ppd->link_width_downgrade_rx_active = rx;
7634 if (ppd->link_width_downgrade_tx_active == 0 ||
7635 ppd->link_width_downgrade_rx_active == 0) {
7636 /* the 8051 reported a dead link as a downgrade */
7637 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7638 link_downgraded = false;
7639 } else if (lwde == 0) {
7640 /* downgrade is disabled */
7642 /* bounce if not at starting active width */
7643 if ((ppd->link_width_active !=
7644 ppd->link_width_downgrade_tx_active) ||
7645 (ppd->link_width_active !=
7646 ppd->link_width_downgrade_rx_active)) {
7648 "Link downgrade is disabled and link has downgraded, downing link\n");
7650 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7651 ppd->link_width_active,
7652 ppd->link_width_downgrade_tx_active,
7653 ppd->link_width_downgrade_rx_active);
7655 link_downgraded = false;
7657 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7658 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7659 /* Tx or Rx is outside the enabled policy */
7661 "Link is outside of downgrade allowed, downing link\n");
7663 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7664 lwde, ppd->link_width_downgrade_tx_active,
7665 ppd->link_width_downgrade_rx_active);
7667 link_downgraded = false;
7671 mutex_unlock(&ppd->hls_lock);
7674 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7675 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7676 set_link_state(ppd, HLS_DN_OFFLINE);
7680 return link_downgraded;
7684 * Handle a link downgrade interrupt from the 8051.
7686 * This is a work-queue function outside of the interrupt.
7688 void handle_link_downgrade(struct work_struct *work)
7690 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7691 link_downgrade_work);
7693 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7694 if (apply_link_downgrade_policy(ppd, true))
7695 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7698 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7700 return flag_string(buf, buf_len, flags, dcc_err_flags,
7701 ARRAY_SIZE(dcc_err_flags));
7704 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7706 return flag_string(buf, buf_len, flags, lcb_err_flags,
7707 ARRAY_SIZE(lcb_err_flags));
7710 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7712 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7713 ARRAY_SIZE(dc8051_err_flags));
7716 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7718 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7719 ARRAY_SIZE(dc8051_info_err_flags));
7722 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7724 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7725 ARRAY_SIZE(dc8051_info_host_msg_flags));
7728 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7730 struct hfi1_pportdata *ppd = dd->pport;
7731 u64 info, err, host_msg;
7732 int queue_link_down = 0;
7735 /* look at the flags */
7736 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7737 /* 8051 information set by firmware */
7738 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7739 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7740 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7741 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7743 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7744 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7747 * Handle error flags.
7749 if (err & FAILED_LNI) {
7751 * LNI error indications are cleared by the 8051
7752 * only when starting polling. Only pay attention
7753 * to them when in the states that occur during
7756 if (ppd->host_link_state
7757 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7758 queue_link_down = 1;
7759 dd_dev_info(dd, "Link error: %s\n",
7760 dc8051_info_err_string(buf,
7765 err &= ~(u64)FAILED_LNI;
7767 /* unknown frames can happen durning LNI, just count */
7768 if (err & UNKNOWN_FRAME) {
7769 ppd->unknown_frame_count++;
7770 err &= ~(u64)UNKNOWN_FRAME;
7773 /* report remaining errors, but do not do anything */
7774 dd_dev_err(dd, "8051 info error: %s\n",
7775 dc8051_info_err_string(buf, sizeof(buf),
7780 * Handle host message flags.
7782 if (host_msg & HOST_REQ_DONE) {
7784 * Presently, the driver does a busy wait for
7785 * host requests to complete. This is only an
7786 * informational message.
7787 * NOTE: The 8051 clears the host message
7788 * information *on the next 8051 command*.
7789 * Therefore, when linkup is achieved,
7790 * this flag will still be set.
7792 host_msg &= ~(u64)HOST_REQ_DONE;
7794 if (host_msg & BC_SMA_MSG) {
7795 queue_work(ppd->link_wq, &ppd->sma_message_work);
7796 host_msg &= ~(u64)BC_SMA_MSG;
7798 if (host_msg & LINKUP_ACHIEVED) {
7799 dd_dev_info(dd, "8051: Link up\n");
7800 queue_work(ppd->link_wq, &ppd->link_up_work);
7801 host_msg &= ~(u64)LINKUP_ACHIEVED;
7803 if (host_msg & EXT_DEVICE_CFG_REQ) {
7804 handle_8051_request(ppd);
7805 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7807 if (host_msg & VERIFY_CAP_FRAME) {
7808 queue_work(ppd->link_wq, &ppd->link_vc_work);
7809 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7811 if (host_msg & LINK_GOING_DOWN) {
7812 const char *extra = "";
7813 /* no downgrade action needed if going down */
7814 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7815 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7816 extra = " (ignoring downgrade)";
7818 dd_dev_info(dd, "8051: Link down%s\n", extra);
7819 queue_link_down = 1;
7820 host_msg &= ~(u64)LINK_GOING_DOWN;
7822 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7823 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7824 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7827 /* report remaining messages, but do not do anything */
7828 dd_dev_info(dd, "8051 info host message: %s\n",
7829 dc8051_info_host_msg_string(buf,
7834 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7836 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7838 * Lost the 8051 heartbeat. If this happens, we
7839 * receive constant interrupts about it. Disable
7840 * the interrupt after the first.
7842 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7843 write_csr(dd, DC_DC8051_ERR_EN,
7844 read_csr(dd, DC_DC8051_ERR_EN) &
7845 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7847 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7850 /* report the error, but do not do anything */
7851 dd_dev_err(dd, "8051 error: %s\n",
7852 dc8051_err_string(buf, sizeof(buf), reg));
7855 if (queue_link_down) {
7857 * if the link is already going down or disabled, do not
7858 * queue another. If there's a link down entry already
7859 * queued, don't queue another one.
7861 if ((ppd->host_link_state &
7862 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7863 ppd->link_enabled == 0) {
7864 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7865 __func__, ppd->host_link_state,
7868 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7870 "%s: link down request already queued\n",
7873 queue_work(ppd->link_wq, &ppd->link_down_work);
7878 static const char * const fm_config_txt[] = {
7880 "BadHeadDist: Distance violation between two head flits",
7882 "BadTailDist: Distance violation between two tail flits",
7884 "BadCtrlDist: Distance violation between two credit control flits",
7886 "BadCrdAck: Credits return for unsupported VL",
7888 "UnsupportedVLMarker: Received VL Marker",
7890 "BadPreempt: Exceeded the preemption nesting level",
7892 "BadControlFlit: Received unsupported control flit",
7895 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7898 static const char * const port_rcv_txt[] = {
7900 "BadPktLen: Illegal PktLen",
7902 "PktLenTooLong: Packet longer than PktLen",
7904 "PktLenTooShort: Packet shorter than PktLen",
7906 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7908 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7910 "BadL2: Illegal L2 opcode",
7912 "BadSC: Unsupported SC",
7914 "BadRC: Illegal RC",
7916 "PreemptError: Preempting with same VL",
7918 "PreemptVL15: Preempting a VL15 packet",
7921 #define OPA_LDR_FMCONFIG_OFFSET 16
7922 #define OPA_LDR_PORTRCV_OFFSET 0
7923 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7925 u64 info, hdr0, hdr1;
7928 struct hfi1_pportdata *ppd = dd->pport;
7932 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7933 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7934 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7935 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7936 /* set status bit */
7937 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7939 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7942 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7943 struct hfi1_pportdata *ppd = dd->pport;
7944 /* this counter saturates at (2^32) - 1 */
7945 if (ppd->link_downed < (u32)UINT_MAX)
7947 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7950 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7951 u8 reason_valid = 1;
7953 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7954 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7955 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7956 /* set status bit */
7957 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7967 extra = fm_config_txt[info];
7970 extra = fm_config_txt[info];
7971 if (ppd->port_error_action &
7972 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7975 * lcl_reason cannot be derived from info
7979 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7984 snprintf(buf, sizeof(buf), "reserved%lld", info);
7989 if (reason_valid && !do_bounce) {
7990 do_bounce = ppd->port_error_action &
7991 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7992 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7995 /* just report this */
7996 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7998 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8001 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8002 u8 reason_valid = 1;
8004 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8005 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8006 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8007 if (!(dd->err_info_rcvport.status_and_code &
8008 OPA_EI_STATUS_SMASK)) {
8009 dd->err_info_rcvport.status_and_code =
8010 info & OPA_EI_CODE_SMASK;
8011 /* set status bit */
8012 dd->err_info_rcvport.status_and_code |=
8013 OPA_EI_STATUS_SMASK;
8015 * save first 2 flits in the packet that caused
8018 dd->err_info_rcvport.packet_flit1 = hdr0;
8019 dd->err_info_rcvport.packet_flit2 = hdr1;
8032 extra = port_rcv_txt[info];
8036 snprintf(buf, sizeof(buf), "reserved%lld", info);
8041 if (reason_valid && !do_bounce) {
8042 do_bounce = ppd->port_error_action &
8043 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8044 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8047 /* just report this */
8048 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8049 " hdr0 0x%llx, hdr1 0x%llx\n",
8052 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8055 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8056 /* informative only */
8057 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8058 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8060 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8061 /* informative only */
8062 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8063 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8066 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8067 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8069 /* report any remaining errors */
8071 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8072 dcc_err_string(buf, sizeof(buf), reg));
8074 if (lcl_reason == 0)
8075 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8078 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8080 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8081 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8085 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8089 dd_dev_info(dd, "LCB Error: %s\n",
8090 lcb_err_string(buf, sizeof(buf), reg));
8094 * CCE block DC interrupt. Source is < 8.
8096 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8098 const struct err_reg_info *eri = &dc_errs[source];
8101 interrupt_clear_down(dd, 0, eri);
8102 } else if (source == 3 /* dc_lbm_int */) {
8104 * This indicates that a parity error has occurred on the
8105 * address/control lines presented to the LBM. The error
8106 * is a single pulse, there is no associated error flag,
8107 * and it is non-maskable. This is because if a parity
8108 * error occurs on the request the request is dropped.
8109 * This should never occur, but it is nice to know if it
8112 dd_dev_err(dd, "Parity error in DC LBM block\n");
8114 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8119 * TX block send credit interrupt. Source is < 160.
8121 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8123 sc_group_release_update(dd, source);
8127 * TX block SDMA interrupt. Source is < 48.
8129 * SDMA interrupts are grouped by type:
8132 * N - 2N-1 = SDmaProgress
8133 * 2N - 3N-1 = SDmaIdle
8135 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8137 /* what interrupt */
8138 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8140 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8142 #ifdef CONFIG_SDMA_VERBOSITY
8143 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8144 slashstrip(__FILE__), __LINE__, __func__);
8145 sdma_dumpstate(&dd->per_sdma[which]);
8148 if (likely(what < 3 && which < dd->num_sdma)) {
8149 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8151 /* should not happen */
8152 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8157 * is_rcv_avail_int() - User receive context available IRQ handler
8159 * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8161 * RX block receive available interrupt. Source is < 160.
8163 * This is the general interrupt handler for user (PSM) receive contexts,
8164 * and can only be used for non-threaded IRQs.
8166 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8168 struct hfi1_ctxtdata *rcd;
8171 if (likely(source < dd->num_rcv_contexts)) {
8172 rcd = hfi1_rcd_get_by_index(dd, source);
8174 handle_user_interrupt(rcd);
8178 /* received an interrupt, but no rcd */
8179 err_detail = "dataless";
8181 /* received an interrupt, but are not using that context */
8182 err_detail = "out of range";
8184 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8185 err_detail, source);
8189 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8191 * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8193 * RX block receive urgent interrupt. Source is < 160.
8195 * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8197 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8199 struct hfi1_ctxtdata *rcd;
8202 if (likely(source < dd->num_rcv_contexts)) {
8203 rcd = hfi1_rcd_get_by_index(dd, source);
8205 handle_user_interrupt(rcd);
8209 /* received an interrupt, but no rcd */
8210 err_detail = "dataless";
8212 /* received an interrupt, but are not using that context */
8213 err_detail = "out of range";
8215 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8216 err_detail, source);
8220 * Reserved range interrupt. Should not be called in normal operation.
8222 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8226 dd_dev_err(dd, "unexpected %s interrupt\n",
8227 is_reserved_name(name, sizeof(name), source));
8230 static const struct is_table is_table[] = {
8233 * name func interrupt func
8235 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8236 is_misc_err_name, is_misc_err_int },
8237 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8238 is_sdma_eng_err_name, is_sdma_eng_err_int },
8239 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8240 is_sendctxt_err_name, is_sendctxt_err_int },
8241 { IS_SDMA_START, IS_SDMA_IDLE_END,
8242 is_sdma_eng_name, is_sdma_eng_int },
8243 { IS_VARIOUS_START, IS_VARIOUS_END,
8244 is_various_name, is_various_int },
8245 { IS_DC_START, IS_DC_END,
8246 is_dc_name, is_dc_int },
8247 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8248 is_rcv_avail_name, is_rcv_avail_int },
8249 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8250 is_rcv_urgent_name, is_rcv_urgent_int },
8251 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8252 is_send_credit_name, is_send_credit_int},
8253 { IS_RESERVED_START, IS_RESERVED_END,
8254 is_reserved_name, is_reserved_int},
8258 * Interrupt source interrupt - called when the given source has an interrupt.
8259 * Source is a bit index into an array of 64-bit integers.
8261 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8263 const struct is_table *entry;
8265 /* avoids a double compare by walking the table in-order */
8266 for (entry = &is_table[0]; entry->is_name; entry++) {
8267 if (source <= entry->end) {
8268 trace_hfi1_interrupt(dd, entry, source);
8269 entry->is_int(dd, source - entry->start);
8273 /* fell off the end */
8274 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8278 * general_interrupt - General interrupt handler
8279 * @irq: MSIx IRQ vector
8280 * @data: hfi1 devdata
8282 * This is able to correctly handle all non-threaded interrupts. Receive
8283 * context DATA IRQs are threaded and are not supported by this handler.
8286 irqreturn_t general_interrupt(int irq, void *data)
8288 struct hfi1_devdata *dd = data;
8289 u64 regs[CCE_NUM_INT_CSRS];
8292 irqreturn_t handled = IRQ_NONE;
8294 this_cpu_inc(*dd->int_counter);
8296 /* phase 1: scan and clear all handled interrupts */
8297 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8298 if (dd->gi_mask[i] == 0) {
8299 regs[i] = 0; /* used later */
8302 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8304 /* only clear if anything is set */
8306 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8309 /* phase 2: call the appropriate handler */
8310 for_each_set_bit(bit, (unsigned long *)®s[0],
8311 CCE_NUM_INT_CSRS * 64) {
8312 is_interrupt(dd, bit);
8313 handled = IRQ_HANDLED;
8319 irqreturn_t sdma_interrupt(int irq, void *data)
8321 struct sdma_engine *sde = data;
8322 struct hfi1_devdata *dd = sde->dd;
8325 #ifdef CONFIG_SDMA_VERBOSITY
8326 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8327 slashstrip(__FILE__), __LINE__, __func__);
8328 sdma_dumpstate(sde);
8331 this_cpu_inc(*dd->int_counter);
8333 /* This read_csr is really bad in the hot path */
8334 status = read_csr(dd,
8335 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8337 if (likely(status)) {
8338 /* clear the interrupt(s) */
8340 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8343 /* handle the interrupt(s) */
8344 sdma_engine_interrupt(sde, status);
8346 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8353 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8354 * to insure that the write completed. This does NOT guarantee that
8355 * queued DMA writes to memory from the chip are pushed.
8357 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8359 struct hfi1_devdata *dd = rcd->dd;
8360 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8362 write_csr(dd, addr, rcd->imask);
8363 /* force the above write on the chip and get a value back */
8364 (void)read_csr(dd, addr);
8367 /* force the receive interrupt */
8368 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8370 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8374 * Return non-zero if a packet is present.
8376 * This routine is called when rechecking for packets after the RcvAvail
8377 * interrupt has been cleared down. First, do a quick check of memory for
8378 * a packet present. If not found, use an expensive CSR read of the context
8379 * tail to determine the actual tail. The CSR read is necessary because there
8380 * is no method to push pending DMAs to memory other than an interrupt and we
8381 * are trying to determine if we need to force an interrupt.
8383 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8387 if (hfi1_packet_present(rcd))
8390 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8391 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8392 return hfi1_rcd_head(rcd) != tail;
8396 * Common code for receive contexts interrupt handlers.
8397 * Update traces, increment kernel IRQ counter and
8398 * setup ASPM when needed.
8400 static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
8402 struct hfi1_devdata *dd = rcd->dd;
8404 trace_hfi1_receive_interrupt(dd, rcd);
8405 this_cpu_inc(*dd->int_counter);
8406 aspm_ctx_disable(rcd);
8410 * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
8411 * when there are packets present in the queue. When calling
8412 * with interrupts enabled please use hfi1_rcd_eoi_intr.
8414 * @rcd: valid receive context
8416 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8420 clear_recv_intr(rcd);
8421 if (check_packet_present(rcd))
8422 force_recv_intr(rcd);
8426 * hfi1_rcd_eoi_intr() - End of Interrupt processing action
8428 * @rcd: Ptr to hfi1_ctxtdata of receive context
8430 * Hold IRQs so we can safely clear the interrupt and
8431 * recheck for a packet that may have arrived after the previous
8432 * check and the interrupt clear. If a packet arrived, force another
8433 * interrupt. This routine can be called at the end of receive packet
8434 * processing in interrupt service routines, interrupt service thread
8437 static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8439 unsigned long flags;
8441 local_irq_save(flags);
8442 __hfi1_rcd_eoi_intr(rcd);
8443 local_irq_restore(flags);
8447 * hfi1_netdev_rx_napi - napi poll function to move eoi inline
8448 * @napi: pointer to napi object
8449 * @budget: netdev budget
8451 int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
8453 struct hfi1_netdev_rxq *rxq = container_of(napi,
8454 struct hfi1_netdev_rxq, napi);
8455 struct hfi1_ctxtdata *rcd = rxq->rcd;
8458 work_done = rcd->do_interrupt(rcd, budget);
8460 if (work_done < budget) {
8461 napi_complete_done(napi, work_done);
8462 hfi1_rcd_eoi_intr(rcd);
8468 /* Receive packet napi handler for netdevs VNIC and AIP */
8469 irqreturn_t receive_context_interrupt_napi(int irq, void *data)
8471 struct hfi1_ctxtdata *rcd = data;
8473 receive_interrupt_common(rcd);
8475 if (likely(rcd->napi)) {
8476 if (likely(napi_schedule_prep(rcd->napi)))
8477 __napi_schedule_irqoff(rcd->napi);
8479 __hfi1_rcd_eoi_intr(rcd);
8481 WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
8483 __hfi1_rcd_eoi_intr(rcd);
8490 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8491 * This routine will try to handle packets immediately (latency), but if
8492 * it finds too many, it will invoke the thread handler (bandwitdh). The
8493 * chip receive interrupt is *not* cleared down until this or the thread (if
8494 * invoked) is finished. The intent is to avoid extra interrupts while we
8495 * are processing packets anyway.
8497 irqreturn_t receive_context_interrupt(int irq, void *data)
8499 struct hfi1_ctxtdata *rcd = data;
8502 receive_interrupt_common(rcd);
8504 /* receive interrupt remains blocked while processing packets */
8505 disposition = rcd->do_interrupt(rcd, 0);
8508 * Too many packets were seen while processing packets in this
8509 * IRQ handler. Invoke the handler thread. The receive interrupt
8512 if (disposition == RCV_PKT_LIMIT)
8513 return IRQ_WAKE_THREAD;
8515 __hfi1_rcd_eoi_intr(rcd);
8520 * Receive packet thread handler. This expects to be invoked with the
8521 * receive interrupt still blocked.
8523 irqreturn_t receive_context_thread(int irq, void *data)
8525 struct hfi1_ctxtdata *rcd = data;
8527 /* receive interrupt is still blocked from the IRQ handler */
8528 (void)rcd->do_interrupt(rcd, 1);
8530 hfi1_rcd_eoi_intr(rcd);
8535 /* ========================================================================= */
8537 u32 read_physical_state(struct hfi1_devdata *dd)
8541 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8542 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8543 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8546 u32 read_logical_state(struct hfi1_devdata *dd)
8550 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8551 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8552 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8555 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8559 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8560 /* clear current state, set new state */
8561 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8562 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8563 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8567 * Use the 8051 to read a LCB CSR.
8569 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8574 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8575 if (acquire_lcb_access(dd, 0) == 0) {
8576 *data = read_csr(dd, addr);
8577 release_lcb_access(dd, 0);
8583 /* register is an index of LCB registers: (offset - base) / 8 */
8584 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8585 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8586 if (ret != HCMD_SUCCESS)
8592 * Provide a cache for some of the LCB registers in case the LCB is
8594 * (The LCB is unavailable in certain link states, for example.)
8601 static struct lcb_datum lcb_cache[] = {
8602 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8603 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8604 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8607 static void update_lcb_cache(struct hfi1_devdata *dd)
8613 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8614 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8616 /* Update if we get good data */
8617 if (likely(ret != -EBUSY))
8618 lcb_cache[i].val = val;
8622 static int read_lcb_cache(u32 off, u64 *val)
8626 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8627 if (lcb_cache[i].off == off) {
8628 *val = lcb_cache[i].val;
8633 pr_warn("%s bad offset 0x%x\n", __func__, off);
8638 * Read an LCB CSR. Access may not be in host control, so check.
8639 * Return 0 on success, -EBUSY on failure.
8641 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8643 struct hfi1_pportdata *ppd = dd->pport;
8645 /* if up, go through the 8051 for the value */
8646 if (ppd->host_link_state & HLS_UP)
8647 return read_lcb_via_8051(dd, addr, data);
8648 /* if going up or down, check the cache, otherwise, no access */
8649 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8650 if (read_lcb_cache(addr, data))
8655 /* otherwise, host has access */
8656 *data = read_csr(dd, addr);
8661 * Use the 8051 to write a LCB CSR.
8663 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8668 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8669 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8670 if (acquire_lcb_access(dd, 0) == 0) {
8671 write_csr(dd, addr, data);
8672 release_lcb_access(dd, 0);
8678 /* register is an index of LCB registers: (offset - base) / 8 */
8679 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8680 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8681 if (ret != HCMD_SUCCESS)
8687 * Write an LCB CSR. Access may not be in host control, so check.
8688 * Return 0 on success, -EBUSY on failure.
8690 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8692 struct hfi1_pportdata *ppd = dd->pport;
8694 /* if up, go through the 8051 for the value */
8695 if (ppd->host_link_state & HLS_UP)
8696 return write_lcb_via_8051(dd, addr, data);
8697 /* if going up or down, no access */
8698 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8700 /* otherwise, host has access */
8701 write_csr(dd, addr, data);
8707 * < 0 = Linux error, not able to get access
8708 * > 0 = 8051 command RETURN_CODE
8710 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8715 unsigned long timeout;
8717 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8719 mutex_lock(&dd->dc8051_lock);
8721 /* We can't send any commands to the 8051 if it's in reset */
8722 if (dd->dc_shutdown) {
8723 return_code = -ENODEV;
8728 * If an 8051 host command timed out previously, then the 8051 is
8731 * On first timeout, attempt to reset and restart the entire DC
8732 * block (including 8051). (Is this too big of a hammer?)
8734 * If the 8051 times out a second time, the reset did not bring it
8735 * back to healthy life. In that case, fail any subsequent commands.
8737 if (dd->dc8051_timed_out) {
8738 if (dd->dc8051_timed_out > 1) {
8740 "Previous 8051 host command timed out, skipping command %u\n",
8742 return_code = -ENXIO;
8750 * If there is no timeout, then the 8051 command interface is
8751 * waiting for a command.
8755 * When writing a LCB CSR, out_data contains the full value to
8756 * be written, while in_data contains the relative LCB
8757 * address in 7:0. Do the work here, rather than the caller,
8758 * of distrubting the write data to where it needs to go:
8761 * 39:00 -> in_data[47:8]
8762 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8763 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8765 if (type == HCMD_WRITE_LCB_CSR) {
8766 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8767 /* must preserve COMPLETED - it is tied to hardware */
8768 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8769 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8770 reg |= ((((*out_data) >> 40) & 0xff) <<
8771 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8772 | ((((*out_data) >> 48) & 0xffff) <<
8773 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8774 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8778 * Do two writes: the first to stabilize the type and req_data, the
8779 * second to activate.
8781 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8782 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8783 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8784 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8785 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8786 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8787 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8789 /* wait for completion, alternate: interrupt */
8790 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8792 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8793 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8796 if (time_after(jiffies, timeout)) {
8797 dd->dc8051_timed_out++;
8798 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8801 return_code = -ETIMEDOUT;
8808 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8809 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8810 if (type == HCMD_READ_LCB_CSR) {
8811 /* top 16 bits are in a different register */
8812 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8813 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8815 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8818 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8819 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8820 dd->dc8051_timed_out = 0;
8822 * Clear command for next user.
8824 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8827 mutex_unlock(&dd->dc8051_lock);
8831 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8833 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8836 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8837 u8 lane_id, u32 config_data)
8842 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8843 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8844 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8845 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8846 if (ret != HCMD_SUCCESS) {
8848 "load 8051 config: field id %d, lane %d, err %d\n",
8849 (int)field_id, (int)lane_id, ret);
8855 * Read the 8051 firmware "registers". Use the RAM directly. Always
8856 * set the result, even on error.
8857 * Return 0 on success, -errno on failure
8859 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8866 /* address start depends on the lane_id */
8868 addr = (4 * NUM_GENERAL_FIELDS)
8869 + (lane_id * 4 * NUM_LANE_FIELDS);
8872 addr += field_id * 4;
8874 /* read is in 8-byte chunks, hardware will truncate the address down */
8875 ret = read_8051_data(dd, addr, 8, &big_data);
8878 /* extract the 4 bytes we want */
8880 *result = (u32)(big_data >> 32);
8882 *result = (u32)big_data;
8885 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8886 __func__, lane_id, field_id);
8892 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8897 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8898 | power_management << POWER_MANAGEMENT_SHIFT;
8899 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8900 GENERAL_CONFIG, frame);
8903 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8904 u16 vl15buf, u8 crc_sizes)
8908 frame = (u32)vau << VAU_SHIFT
8910 | (u32)vcu << VCU_SHIFT
8911 | (u32)vl15buf << VL15BUF_SHIFT
8912 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8913 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8914 GENERAL_CONFIG, frame);
8917 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8918 u8 *flag_bits, u16 *link_widths)
8922 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8924 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8925 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8926 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8929 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8936 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8937 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8938 | (u32)link_widths << LINK_WIDTH_SHIFT;
8939 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8943 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8948 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8949 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8950 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8953 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8958 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8959 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8960 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8961 & REMOTE_DEVICE_REV_MASK;
8964 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8969 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8970 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8971 /* Clear, then set field */
8973 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8974 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8978 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8983 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8984 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8985 STS_FM_VERSION_MAJOR_MASK;
8986 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8987 STS_FM_VERSION_MINOR_MASK;
8989 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8990 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8991 STS_FM_VERSION_PATCH_MASK;
8994 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8999 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
9000 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
9001 & POWER_MANAGEMENT_MASK;
9002 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
9003 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
9006 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
9007 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
9011 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
9012 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
9013 *z = (frame >> Z_SHIFT) & Z_MASK;
9014 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
9015 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
9016 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
9019 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
9025 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
9027 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
9028 & REMOTE_TX_RATE_MASK;
9029 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
9032 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
9036 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
9037 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
9040 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
9042 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
9045 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
9047 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
9050 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9056 if (dd->pport->host_link_state & HLS_UP) {
9057 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9060 *link_quality = (frame >> LINK_QUALITY_SHIFT)
9061 & LINK_QUALITY_MASK;
9065 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9069 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9070 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9073 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9077 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9078 *ldr = (frame & 0xff);
9081 static int read_tx_settings(struct hfi1_devdata *dd,
9083 u8 *tx_polarity_inversion,
9084 u8 *rx_polarity_inversion,
9090 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9091 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9092 & ENABLE_LANE_TX_MASK;
9093 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9094 & TX_POLARITY_INVERSION_MASK;
9095 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9096 & RX_POLARITY_INVERSION_MASK;
9097 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9101 static int write_tx_settings(struct hfi1_devdata *dd,
9103 u8 tx_polarity_inversion,
9104 u8 rx_polarity_inversion,
9109 /* no need to mask, all variable sizes match field widths */
9110 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9111 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9112 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9113 | max_rate << MAX_RATE_SHIFT;
9114 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9118 * Read an idle LCB message.
9120 * Returns 0 on success, -EINVAL on error
9122 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9126 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9127 if (ret != HCMD_SUCCESS) {
9128 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9132 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9133 /* return only the payload as we already know the type */
9134 *data_out >>= IDLE_PAYLOAD_SHIFT;
9139 * Read an idle SMA message. To be done in response to a notification from
9142 * Returns 0 on success, -EINVAL on error
9144 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9146 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9151 * Send an idle LCB message.
9153 * Returns 0 on success, -EINVAL on error
9155 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9159 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9160 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9161 if (ret != HCMD_SUCCESS) {
9162 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9170 * Send an idle SMA message.
9172 * Returns 0 on success, -EINVAL on error
9174 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9178 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9179 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9180 return send_idle_message(dd, data);
9184 * Initialize the LCB then do a quick link up. This may or may not be
9187 * return 0 on success, -errno on error
9189 static int do_quick_linkup(struct hfi1_devdata *dd)
9193 lcb_shutdown(dd, 0);
9196 /* LCB_CFG_LOOPBACK.VAL = 2 */
9197 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9198 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9199 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9200 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9203 /* start the LCBs */
9204 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9205 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9207 /* simulator only loopback steps */
9208 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9209 /* LCB_CFG_RUN.EN = 1 */
9210 write_csr(dd, DC_LCB_CFG_RUN,
9211 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9213 ret = wait_link_transfer_active(dd, 10);
9217 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9218 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9223 * When doing quick linkup and not in loopback, both
9224 * sides must be done with LCB set-up before either
9225 * starts the quick linkup. Put a delay here so that
9226 * both sides can be started and have a chance to be
9227 * done with LCB set up before resuming.
9230 "Pausing for peer to be finished with LCB set up\n");
9232 dd_dev_err(dd, "Continuing with quick linkup\n");
9235 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9236 set_8051_lcb_access(dd);
9239 * State "quick" LinkUp request sets the physical link state to
9240 * LinkUp without a verify capability sequence.
9241 * This state is in simulator v37 and later.
9243 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9244 if (ret != HCMD_SUCCESS) {
9246 "%s: set physical link state to quick LinkUp failed with return %d\n",
9249 set_host_lcb_access(dd);
9250 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9257 return 0; /* success */
9261 * Do all special steps to set up loopback.
9263 static int init_loopback(struct hfi1_devdata *dd)
9265 dd_dev_info(dd, "Entering loopback mode\n");
9267 /* all loopbacks should disable self GUID check */
9268 write_csr(dd, DC_DC8051_CFG_MODE,
9269 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9272 * The simulator has only one loopback option - LCB. Switch
9273 * to that option, which includes quick link up.
9275 * Accept all valid loopback values.
9277 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9278 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9279 loopback == LOOPBACK_CABLE)) {
9280 loopback = LOOPBACK_LCB;
9286 * SerDes loopback init sequence is handled in set_local_link_attributes
9288 if (loopback == LOOPBACK_SERDES)
9291 /* LCB loopback - handled at poll time */
9292 if (loopback == LOOPBACK_LCB) {
9293 quick_linkup = 1; /* LCB is always quick linkup */
9295 /* not supported in emulation due to emulation RTL changes */
9296 if (dd->icode == ICODE_FPGA_EMULATION) {
9298 "LCB loopback not supported in emulation\n");
9304 /* external cable loopback requires no extra steps */
9305 if (loopback == LOOPBACK_CABLE)
9308 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9313 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9314 * used in the Verify Capability link width attribute.
9316 static u16 opa_to_vc_link_widths(u16 opa_widths)
9321 static const struct link_bits {
9324 } opa_link_xlate[] = {
9325 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9326 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9327 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9328 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9331 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9332 if (opa_widths & opa_link_xlate[i].from)
9333 result |= opa_link_xlate[i].to;
9339 * Set link attributes before moving to polling.
9341 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9343 struct hfi1_devdata *dd = ppd->dd;
9345 u8 tx_polarity_inversion;
9346 u8 rx_polarity_inversion;
9349 /* reset our fabric serdes to clear any lingering problems */
9350 fabric_serdes_reset(dd);
9352 /* set the local tx rate - need to read-modify-write */
9353 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9354 &rx_polarity_inversion, &ppd->local_tx_rate);
9356 goto set_local_link_attributes_fail;
9358 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9359 /* set the tx rate to the fastest enabled */
9360 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9361 ppd->local_tx_rate = 1;
9363 ppd->local_tx_rate = 0;
9365 /* set the tx rate to all enabled */
9366 ppd->local_tx_rate = 0;
9367 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9368 ppd->local_tx_rate |= 2;
9369 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9370 ppd->local_tx_rate |= 1;
9373 enable_lane_tx = 0xF; /* enable all four lanes */
9374 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9375 rx_polarity_inversion, ppd->local_tx_rate);
9376 if (ret != HCMD_SUCCESS)
9377 goto set_local_link_attributes_fail;
9379 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9380 if (ret != HCMD_SUCCESS) {
9382 "Failed to set host interface version, return 0x%x\n",
9384 goto set_local_link_attributes_fail;
9388 * DC supports continuous updates.
9390 ret = write_vc_local_phy(dd,
9391 0 /* no power management */,
9392 1 /* continuous updates */);
9393 if (ret != HCMD_SUCCESS)
9394 goto set_local_link_attributes_fail;
9396 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9397 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9398 ppd->port_crc_mode_enabled);
9399 if (ret != HCMD_SUCCESS)
9400 goto set_local_link_attributes_fail;
9403 * SerDes loopback init sequence requires
9404 * setting bit 0 of MISC_CONFIG_BITS
9406 if (loopback == LOOPBACK_SERDES)
9407 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9410 * An external device configuration request is used to reset the LCB
9411 * to retry to obtain operational lanes when the first attempt is
9414 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9415 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9417 ret = write_vc_local_link_mode(dd, misc_bits, 0,
9418 opa_to_vc_link_widths(
9419 ppd->link_width_enabled));
9420 if (ret != HCMD_SUCCESS)
9421 goto set_local_link_attributes_fail;
9423 /* let peer know who we are */
9424 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9425 if (ret == HCMD_SUCCESS)
9428 set_local_link_attributes_fail:
9430 "Failed to set local link attributes, return 0x%x\n",
9436 * Call this to start the link.
9437 * Do not do anything if the link is disabled.
9438 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9440 int start_link(struct hfi1_pportdata *ppd)
9443 * Tune the SerDes to a ballpark setting for optimal signal and bit
9444 * error rate. Needs to be done before starting the link.
9448 if (!ppd->driver_link_ready) {
9449 dd_dev_info(ppd->dd,
9450 "%s: stopping link start because driver is not ready\n",
9456 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9457 * pkey table can be configured properly if the HFI unit is connected
9458 * to switch port with MgmtAllowed=NO
9460 clear_full_mgmt_pkey(ppd);
9462 return set_link_state(ppd, HLS_DN_POLL);
9465 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9467 struct hfi1_devdata *dd = ppd->dd;
9469 unsigned long timeout;
9472 * Some QSFP cables have a quirk that asserts the IntN line as a side
9473 * effect of power up on plug-in. We ignore this false positive
9474 * interrupt until the module has finished powering up by waiting for
9475 * a minimum timeout of the module inrush initialization time of
9476 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9477 * module have stabilized.
9482 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9484 timeout = jiffies + msecs_to_jiffies(2000);
9486 mask = read_csr(dd, dd->hfi1_id ?
9487 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9488 if (!(mask & QSFP_HFI0_INT_N))
9490 if (time_after(jiffies, timeout)) {
9491 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9499 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9501 struct hfi1_devdata *dd = ppd->dd;
9504 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9507 * Clear the status register to avoid an immediate interrupt
9508 * when we re-enable the IntN pin
9510 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9512 mask |= (u64)QSFP_HFI0_INT_N;
9514 mask &= ~(u64)QSFP_HFI0_INT_N;
9516 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9519 int reset_qsfp(struct hfi1_pportdata *ppd)
9521 struct hfi1_devdata *dd = ppd->dd;
9522 u64 mask, qsfp_mask;
9524 /* Disable INT_N from triggering QSFP interrupts */
9525 set_qsfp_int_n(ppd, 0);
9527 /* Reset the QSFP */
9528 mask = (u64)QSFP_HFI0_RESET_N;
9530 qsfp_mask = read_csr(dd,
9531 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9534 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9540 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9542 wait_for_qsfp_init(ppd);
9545 * Allow INT_N to trigger the QSFP interrupt to watch
9546 * for alarms and warnings
9548 set_qsfp_int_n(ppd, 1);
9551 * After the reset, AOC transmitters are enabled by default. They need
9552 * to be turned off to complete the QSFP setup before they can be
9555 return set_qsfp_tx(ppd, 0);
9558 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9559 u8 *qsfp_interrupt_status)
9561 struct hfi1_devdata *dd = ppd->dd;
9563 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9564 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9565 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9568 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9569 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9570 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9574 * The remaining alarms/warnings don't matter if the link is down.
9576 if (ppd->host_link_state & HLS_DOWN)
9579 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9580 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9581 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9584 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9585 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9586 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9589 /* Byte 2 is vendor specific */
9591 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9592 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9593 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9596 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9597 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9598 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9601 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9602 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9603 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9606 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9607 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9608 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9611 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9612 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9613 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9616 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9617 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9618 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9621 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9622 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9623 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9626 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9627 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9628 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9631 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9632 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9633 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9636 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9637 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9638 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9641 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9642 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9643 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9646 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9647 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9648 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9651 /* Bytes 9-10 and 11-12 are reserved */
9652 /* Bytes 13-15 are vendor specific */
9657 /* This routine will only be scheduled if the QSFP module present is asserted */
9658 void qsfp_event(struct work_struct *work)
9660 struct qsfp_data *qd;
9661 struct hfi1_pportdata *ppd;
9662 struct hfi1_devdata *dd;
9664 qd = container_of(work, struct qsfp_data, qsfp_work);
9669 if (!qsfp_mod_present(ppd))
9672 if (ppd->host_link_state == HLS_DN_DISABLE) {
9673 dd_dev_info(ppd->dd,
9674 "%s: stopping link start because link is disabled\n",
9680 * Turn DC back on after cable has been re-inserted. Up until
9681 * now, the DC has been in reset to save power.
9685 if (qd->cache_refresh_required) {
9686 set_qsfp_int_n(ppd, 0);
9688 wait_for_qsfp_init(ppd);
9691 * Allow INT_N to trigger the QSFP interrupt to watch
9692 * for alarms and warnings
9694 set_qsfp_int_n(ppd, 1);
9699 if (qd->check_interrupt_flags) {
9700 u8 qsfp_interrupt_status[16] = {0,};
9702 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9703 &qsfp_interrupt_status[0], 16) != 16) {
9705 "%s: Failed to read status of QSFP module\n",
9708 unsigned long flags;
9710 handle_qsfp_error_conditions(
9711 ppd, qsfp_interrupt_status);
9712 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9713 ppd->qsfp_info.check_interrupt_flags = 0;
9714 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9720 void init_qsfp_int(struct hfi1_devdata *dd)
9722 struct hfi1_pportdata *ppd = dd->pport;
9725 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9726 /* Clear current status to avoid spurious interrupts */
9727 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9729 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9732 set_qsfp_int_n(ppd, 0);
9734 /* Handle active low nature of INT_N and MODPRST_N pins */
9735 if (qsfp_mod_present(ppd))
9736 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9738 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9741 /* Enable the appropriate QSFP IRQ source */
9743 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9745 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9749 * Do a one-time initialize of the LCB block.
9751 static void init_lcb(struct hfi1_devdata *dd)
9753 /* simulator does not correctly handle LCB cclk loopback, skip */
9754 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9757 /* the DC has been reset earlier in the driver load */
9759 /* set LCB for cclk loopback on the port */
9760 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9761 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9762 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9763 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9764 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9765 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9766 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9770 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9773 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9779 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9782 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9785 /* read byte 2, the status byte */
9786 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9792 return 0; /* success */
9796 * Values for QSFP retry.
9798 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9799 * arrived at from experience on a large cluster.
9801 #define MAX_QSFP_RETRIES 20
9802 #define QSFP_RETRY_WAIT 500 /* msec */
9805 * Try a QSFP read. If it fails, schedule a retry for later.
9806 * Called on first link activation after driver load.
9808 static void try_start_link(struct hfi1_pportdata *ppd)
9810 if (test_qsfp_read(ppd)) {
9812 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9813 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9816 dd_dev_info(ppd->dd,
9817 "QSFP not responding, waiting and retrying %d\n",
9818 (int)ppd->qsfp_retry_count);
9819 ppd->qsfp_retry_count++;
9820 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9821 msecs_to_jiffies(QSFP_RETRY_WAIT));
9824 ppd->qsfp_retry_count = 0;
9830 * Workqueue function to start the link after a delay.
9832 void handle_start_link(struct work_struct *work)
9834 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9835 start_link_work.work);
9836 try_start_link(ppd);
9839 int bringup_serdes(struct hfi1_pportdata *ppd)
9841 struct hfi1_devdata *dd = ppd->dd;
9845 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9846 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9848 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9851 guid = dd->base_guid + ppd->port - 1;
9852 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9855 /* Set linkinit_reason on power up per OPA spec */
9856 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9858 /* one-time init of the LCB */
9862 ret = init_loopback(dd);
9868 if (ppd->port_type == PORT_TYPE_QSFP) {
9869 set_qsfp_int_n(ppd, 0);
9870 wait_for_qsfp_init(ppd);
9871 set_qsfp_int_n(ppd, 1);
9874 try_start_link(ppd);
9878 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9880 struct hfi1_devdata *dd = ppd->dd;
9883 * Shut down the link and keep it down. First turn off that the
9884 * driver wants to allow the link to be up (driver_link_ready).
9885 * Then make sure the link is not automatically restarted
9886 * (link_enabled). Cancel any pending restart. And finally
9889 ppd->driver_link_ready = 0;
9890 ppd->link_enabled = 0;
9892 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9893 flush_delayed_work(&ppd->start_link_work);
9894 cancel_delayed_work_sync(&ppd->start_link_work);
9896 ppd->offline_disabled_reason =
9897 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9898 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9899 OPA_LINKDOWN_REASON_REBOOT);
9900 set_link_state(ppd, HLS_DN_OFFLINE);
9902 /* disable the port */
9903 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9904 cancel_work_sync(&ppd->freeze_work);
9907 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9909 struct hfi1_pportdata *ppd;
9912 ppd = (struct hfi1_pportdata *)(dd + 1);
9913 for (i = 0; i < dd->num_pports; i++, ppd++) {
9914 ppd->ibport_data.rvp.rc_acks = NULL;
9915 ppd->ibport_data.rvp.rc_qacks = NULL;
9916 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9917 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9918 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9919 if (!ppd->ibport_data.rvp.rc_acks ||
9920 !ppd->ibport_data.rvp.rc_delayed_comp ||
9921 !ppd->ibport_data.rvp.rc_qacks)
9929 * index is the index into the receive array
9931 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9932 u32 type, unsigned long pa, u16 order)
9936 if (!(dd->flags & HFI1_PRESENT))
9939 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9942 } else if (type > PT_INVALID) {
9944 "unexpected receive array type %u for index %u, not handled\n",
9948 trace_hfi1_put_tid(dd, index, type, pa, order);
9950 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9951 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9952 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9953 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9954 << RCV_ARRAY_RT_ADDR_SHIFT;
9955 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9956 writeq(reg, dd->rcvarray_wc + (index * 8));
9958 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9960 * Eager entries are written and flushed
9962 * Expected entries are flushed every 4 writes
9969 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9971 struct hfi1_devdata *dd = rcd->dd;
9974 /* this could be optimized */
9975 for (i = rcd->eager_base; i < rcd->eager_base +
9976 rcd->egrbufs.alloced; i++)
9977 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9979 for (i = rcd->expected_base;
9980 i < rcd->expected_base + rcd->expected_count; i++)
9981 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9984 static const char * const ib_cfg_name_strings[] = {
9985 "HFI1_IB_CFG_LIDLMC",
9986 "HFI1_IB_CFG_LWID_DG_ENB",
9987 "HFI1_IB_CFG_LWID_ENB",
9989 "HFI1_IB_CFG_SPD_ENB",
9991 "HFI1_IB_CFG_RXPOL_ENB",
9992 "HFI1_IB_CFG_LREV_ENB",
9993 "HFI1_IB_CFG_LINKLATENCY",
9994 "HFI1_IB_CFG_HRTBT",
9995 "HFI1_IB_CFG_OP_VLS",
9996 "HFI1_IB_CFG_VL_HIGH_CAP",
9997 "HFI1_IB_CFG_VL_LOW_CAP",
9998 "HFI1_IB_CFG_OVERRUN_THRESH",
9999 "HFI1_IB_CFG_PHYERR_THRESH",
10000 "HFI1_IB_CFG_LINKDEFAULT",
10001 "HFI1_IB_CFG_PKEYS",
10003 "HFI1_IB_CFG_LSTATE",
10004 "HFI1_IB_CFG_VL_HIGH_LIMIT",
10005 "HFI1_IB_CFG_PMA_TICKS",
10009 static const char *ib_cfg_name(int which)
10011 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
10013 return ib_cfg_name_strings[which];
10016 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
10018 struct hfi1_devdata *dd = ppd->dd;
10022 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
10023 val = ppd->link_width_enabled;
10025 case HFI1_IB_CFG_LWID: /* currently active Link-width */
10026 val = ppd->link_width_active;
10028 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10029 val = ppd->link_speed_enabled;
10031 case HFI1_IB_CFG_SPD: /* current Link speed */
10032 val = ppd->link_speed_active;
10035 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
10036 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
10037 case HFI1_IB_CFG_LINKLATENCY:
10038 goto unimplemented;
10040 case HFI1_IB_CFG_OP_VLS:
10041 val = ppd->actual_vls_operational;
10043 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
10044 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10046 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
10047 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10049 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10050 val = ppd->overrun_threshold;
10052 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10053 val = ppd->phy_error_threshold;
10055 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10059 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10060 case HFI1_IB_CFG_PMA_TICKS:
10063 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10066 "%s: which %s: not implemented\n",
10068 ib_cfg_name(which));
10076 * The largest MAD packet size.
10078 #define MAX_MAD_PACKET 2048
10081 * Return the maximum header bytes that can go on the _wire_
10082 * for this device. This count includes the ICRC which is
10083 * not part of the packet held in memory but it is appended
10085 * This is dependent on the device's receive header entry size.
10086 * HFI allows this to be set per-receive context, but the
10087 * driver presently enforces a global value.
10089 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10092 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10093 * the Receive Header Entry Size minus the PBC (or RHF) size
10094 * plus one DW for the ICRC appended by HW.
10096 * dd->rcd[0].rcvhdrqentsize is in DW.
10097 * We use rcd[0] as all context will have the same value. Also,
10098 * the first kernel context would have been allocated by now so
10099 * we are guaranteed a valid value.
10101 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10106 * @ppd: per port data
10108 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10109 * registers compare against LRH.PktLen, so use the max bytes included
10112 * This routine changes all VL values except VL15, which it maintains at
10115 static void set_send_length(struct hfi1_pportdata *ppd)
10117 struct hfi1_devdata *dd = ppd->dd;
10118 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10119 u32 maxvlmtu = dd->vld[15].mtu;
10120 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10121 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10122 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10126 for (i = 0; i < ppd->vls_supported; i++) {
10127 if (dd->vld[i].mtu > maxvlmtu)
10128 maxvlmtu = dd->vld[i].mtu;
10130 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10131 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10132 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10134 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10135 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10136 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10138 write_csr(dd, SEND_LEN_CHECK0, len1);
10139 write_csr(dd, SEND_LEN_CHECK1, len2);
10140 /* adjust kernel credit return thresholds based on new MTUs */
10141 /* all kernel receive contexts have the same hdrqentsize */
10142 for (i = 0; i < ppd->vls_supported; i++) {
10143 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10144 sc_mtu_to_threshold(dd->vld[i].sc,
10146 get_hdrqentsize(dd->rcd[0])));
10147 for (j = 0; j < INIT_SC_PER_VL; j++)
10148 sc_set_cr_threshold(
10149 pio_select_send_context_vl(dd, j, i),
10152 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10153 sc_mtu_to_threshold(dd->vld[15].sc,
10155 dd->rcd[0]->rcvhdrqentsize));
10156 sc_set_cr_threshold(dd->vld[15].sc, thres);
10158 /* Adjust maximum MTU for the port in DC */
10159 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10160 (ilog2(maxvlmtu >> 8) + 1);
10161 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10162 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10163 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10164 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10165 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10168 static void set_lidlmc(struct hfi1_pportdata *ppd)
10172 struct hfi1_devdata *dd = ppd->dd;
10173 u32 mask = ~((1U << ppd->lmc) - 1);
10174 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10178 * Program 0 in CSR if port lid is extended. This prevents
10179 * 9B packets being sent out for large lids.
10181 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10182 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10183 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10184 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10185 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10186 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10187 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10188 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10191 * Iterate over all the send contexts and set their SLID check
10193 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10194 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10195 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10196 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10198 for (i = 0; i < chip_send_contexts(dd); i++) {
10199 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10201 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10204 /* Now we have to do the same thing for the sdma engines */
10205 sdma_update_lmc(dd, mask, lid);
10208 static const char *state_completed_string(u32 completed)
10210 static const char * const state_completed[] = {
10216 if (completed < ARRAY_SIZE(state_completed))
10217 return state_completed[completed];
10222 static const char all_lanes_dead_timeout_expired[] =
10223 "All lanes were inactive – was the interconnect media removed?";
10224 static const char tx_out_of_policy[] =
10225 "Passing lanes on local port do not meet the local link width policy";
10226 static const char no_state_complete[] =
10227 "State timeout occurred before link partner completed the state";
10228 static const char * const state_complete_reasons[] = {
10229 [0x00] = "Reason unknown",
10230 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10231 [0x02] = "Link partner reported failure",
10232 [0x10] = "Unable to achieve frame sync on any lane",
10234 "Unable to find a common bit rate with the link partner",
10236 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10238 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10239 [0x14] = no_state_complete,
10241 "State timeout occurred before link partner identified equalization presets",
10243 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10244 [0x17] = tx_out_of_policy,
10245 [0x20] = all_lanes_dead_timeout_expired,
10247 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10248 [0x22] = no_state_complete,
10250 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10251 [0x24] = tx_out_of_policy,
10252 [0x30] = all_lanes_dead_timeout_expired,
10254 "State timeout occurred waiting for host to process received frames",
10255 [0x32] = no_state_complete,
10257 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10258 [0x34] = tx_out_of_policy,
10259 [0x35] = "Negotiated link width is mutually exclusive",
10261 "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10262 [0x37] = "Unable to resolve secure data exchange",
10265 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10268 const char *str = NULL;
10270 if (code < ARRAY_SIZE(state_complete_reasons))
10271 str = state_complete_reasons[code];
10278 /* describe the given last state complete frame */
10279 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10280 const char *prefix)
10282 struct hfi1_devdata *dd = ppd->dd;
10290 * [ 0: 0] - success
10292 * [ 7: 4] - next state timeout
10293 * [15: 8] - reason code
10296 success = frame & 0x1;
10297 state = (frame >> 1) & 0x7;
10298 reason = (frame >> 8) & 0xff;
10299 lanes = (frame >> 16) & 0xffff;
10301 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10303 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10304 state_completed_string(state), state);
10305 dd_dev_err(dd, " state successfully completed: %s\n",
10306 success ? "yes" : "no");
10307 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10308 reason, state_complete_reason_code_string(ppd, reason));
10309 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10313 * Read the last state complete frames and explain them. This routine
10314 * expects to be called if the link went down during link negotiation
10315 * and initialization (LNI). That is, anywhere between polling and link up.
10317 static void check_lni_states(struct hfi1_pportdata *ppd)
10319 u32 last_local_state;
10320 u32 last_remote_state;
10322 read_last_local_state(ppd->dd, &last_local_state);
10323 read_last_remote_state(ppd->dd, &last_remote_state);
10326 * Don't report anything if there is nothing to report. A value of
10327 * 0 means the link was taken down while polling and there was no
10328 * training in-process.
10330 if (last_local_state == 0 && last_remote_state == 0)
10333 decode_state_complete(ppd, last_local_state, "transmitted");
10334 decode_state_complete(ppd, last_remote_state, "received");
10337 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10338 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10341 unsigned long timeout;
10343 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10344 timeout = jiffies + msecs_to_jiffies(wait_ms);
10346 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10349 if (time_after(jiffies, timeout)) {
10351 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10359 /* called when the logical link state is not down as it should be */
10360 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10362 struct hfi1_devdata *dd = ppd->dd;
10365 * Bring link up in LCB loopback
10367 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10368 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10369 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10371 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10372 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10373 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10374 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10376 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10377 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10379 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10380 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10382 wait_link_transfer_active(dd, 100);
10385 * Bring the link down again.
10387 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10388 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10389 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10391 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10395 * Helper for set_link_state(). Do not call except from that routine.
10396 * Expects ppd->hls_mutex to be held.
10398 * @rem_reason value to be sent to the neighbor
10400 * LinkDownReasons only set if transition succeeds.
10402 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10404 struct hfi1_devdata *dd = ppd->dd;
10405 u32 previous_state;
10406 int offline_state_ret;
10409 update_lcb_cache(dd);
10411 previous_state = ppd->host_link_state;
10412 ppd->host_link_state = HLS_GOING_OFFLINE;
10414 /* start offline transition */
10415 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10417 if (ret != HCMD_SUCCESS) {
10419 "Failed to transition to Offline link state, return %d\n",
10423 if (ppd->offline_disabled_reason ==
10424 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10425 ppd->offline_disabled_reason =
10426 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10428 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10429 if (offline_state_ret < 0)
10430 return offline_state_ret;
10432 /* Disabling AOC transmitters */
10433 if (ppd->port_type == PORT_TYPE_QSFP &&
10434 ppd->qsfp_info.limiting_active &&
10435 qsfp_mod_present(ppd)) {
10438 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10440 set_qsfp_tx(ppd, 0);
10441 release_chip_resource(dd, qsfp_resource(dd));
10443 /* not fatal, but should warn */
10445 "Unable to acquire lock to turn off QSFP TX\n");
10450 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10451 * can take a while for the link to go down.
10453 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10454 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10460 * Now in charge of LCB - must be after the physical state is
10461 * offline.quiet and before host_link_state is changed.
10463 set_host_lcb_access(dd);
10464 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10466 /* make sure the logical state is also down */
10467 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10469 force_logical_link_state_down(ppd);
10471 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10472 update_statusp(ppd, IB_PORT_DOWN);
10475 * The LNI has a mandatory wait time after the physical state
10476 * moves to Offline.Quiet. The wait time may be different
10477 * depending on how the link went down. The 8051 firmware
10478 * will observe the needed wait time and only move to ready
10479 * when that is completed. The largest of the quiet timeouts
10480 * is 6s, so wait that long and then at least 0.5s more for
10481 * other transitions, and another 0.5s for a buffer.
10483 ret = wait_fm_ready(dd, 7000);
10486 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10487 /* state is really offline, so make it so */
10488 ppd->host_link_state = HLS_DN_OFFLINE;
10493 * The state is now offline and the 8051 is ready to accept host
10495 * - change our state
10496 * - notify others if we were previously in a linkup state
10498 ppd->host_link_state = HLS_DN_OFFLINE;
10499 if (previous_state & HLS_UP) {
10500 /* went down while link was up */
10501 handle_linkup_change(dd, 0);
10502 } else if (previous_state
10503 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10504 /* went down while attempting link up */
10505 check_lni_states(ppd);
10507 /* The QSFP doesn't need to be reset on LNI failure */
10508 ppd->qsfp_info.reset_needed = 0;
10511 /* the active link width (downgrade) is 0 on link down */
10512 ppd->link_width_active = 0;
10513 ppd->link_width_downgrade_tx_active = 0;
10514 ppd->link_width_downgrade_rx_active = 0;
10515 ppd->current_egress_rate = 0;
10519 /* return the link state name */
10520 static const char *link_state_name(u32 state)
10523 int n = ilog2(state);
10524 static const char * const names[] = {
10525 [__HLS_UP_INIT_BP] = "INIT",
10526 [__HLS_UP_ARMED_BP] = "ARMED",
10527 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10528 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10529 [__HLS_DN_POLL_BP] = "POLL",
10530 [__HLS_DN_DISABLE_BP] = "DISABLE",
10531 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10532 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10533 [__HLS_GOING_UP_BP] = "GOING_UP",
10534 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10535 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10538 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10539 return name ? name : "unknown";
10542 /* return the link state reason name */
10543 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10545 if (state == HLS_UP_INIT) {
10546 switch (ppd->linkinit_reason) {
10547 case OPA_LINKINIT_REASON_LINKUP:
10549 case OPA_LINKINIT_REASON_FLAPPING:
10550 return "(FLAPPING)";
10551 case OPA_LINKINIT_OUTSIDE_POLICY:
10552 return "(OUTSIDE_POLICY)";
10553 case OPA_LINKINIT_QUARANTINED:
10554 return "(QUARANTINED)";
10555 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10556 return "(INSUFIC_CAPABILITY)";
10565 * driver_pstate - convert the driver's notion of a port's
10566 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10567 * Return -1 (converted to a u32) to indicate error.
10569 u32 driver_pstate(struct hfi1_pportdata *ppd)
10571 switch (ppd->host_link_state) {
10574 case HLS_UP_ACTIVE:
10575 return IB_PORTPHYSSTATE_LINKUP;
10577 return IB_PORTPHYSSTATE_POLLING;
10578 case HLS_DN_DISABLE:
10579 return IB_PORTPHYSSTATE_DISABLED;
10580 case HLS_DN_OFFLINE:
10581 return OPA_PORTPHYSSTATE_OFFLINE;
10582 case HLS_VERIFY_CAP:
10583 return IB_PORTPHYSSTATE_TRAINING;
10585 return IB_PORTPHYSSTATE_TRAINING;
10586 case HLS_GOING_OFFLINE:
10587 return OPA_PORTPHYSSTATE_OFFLINE;
10588 case HLS_LINK_COOLDOWN:
10589 return OPA_PORTPHYSSTATE_OFFLINE;
10590 case HLS_DN_DOWNDEF:
10592 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10593 ppd->host_link_state);
10599 * driver_lstate - convert the driver's notion of a port's
10600 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10601 * (converted to a u32) to indicate error.
10603 u32 driver_lstate(struct hfi1_pportdata *ppd)
10605 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10606 return IB_PORT_DOWN;
10608 switch (ppd->host_link_state & HLS_UP) {
10610 return IB_PORT_INIT;
10612 return IB_PORT_ARMED;
10613 case HLS_UP_ACTIVE:
10614 return IB_PORT_ACTIVE;
10616 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10617 ppd->host_link_state);
10622 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10623 u8 neigh_reason, u8 rem_reason)
10625 if (ppd->local_link_down_reason.latest == 0 &&
10626 ppd->neigh_link_down_reason.latest == 0) {
10627 ppd->local_link_down_reason.latest = lcl_reason;
10628 ppd->neigh_link_down_reason.latest = neigh_reason;
10629 ppd->remote_link_down_reason = rem_reason;
10634 * data_vls_operational() - Verify if data VL BCT credits and MTU
10636 * @ppd: pointer to hfi1_pportdata structure
10638 * Return: true - Ok, false -otherwise.
10640 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10645 if (!ppd->actual_vls_operational)
10648 for (i = 0; i < ppd->vls_supported; i++) {
10649 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10650 if ((reg && !ppd->dd->vld[i].mtu) ||
10651 (!reg && ppd->dd->vld[i].mtu))
10659 * Change the physical and/or logical link state.
10661 * Do not call this routine while inside an interrupt. It contains
10662 * calls to routines that can take multiple seconds to finish.
10664 * Returns 0 on success, -errno on failure.
10666 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10668 struct hfi1_devdata *dd = ppd->dd;
10669 struct ib_event event = {.device = NULL};
10671 int orig_new_state, poll_bounce;
10673 mutex_lock(&ppd->hls_lock);
10675 orig_new_state = state;
10676 if (state == HLS_DN_DOWNDEF)
10677 state = HLS_DEFAULT;
10679 /* interpret poll -> poll as a link bounce */
10680 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10681 state == HLS_DN_POLL;
10683 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10684 link_state_name(ppd->host_link_state),
10685 link_state_name(orig_new_state),
10686 poll_bounce ? "(bounce) " : "",
10687 link_state_reason_name(ppd, state));
10690 * If we're going to a (HLS_*) link state that implies the logical
10691 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10692 * reset is_sm_config_started to 0.
10694 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10695 ppd->is_sm_config_started = 0;
10698 * Do nothing if the states match. Let a poll to poll link bounce
10701 if (ppd->host_link_state == state && !poll_bounce)
10706 if (ppd->host_link_state == HLS_DN_POLL &&
10707 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10709 * Quick link up jumps from polling to here.
10711 * Whether in normal or loopback mode, the
10712 * simulator jumps from polling to link up.
10713 * Accept that here.
10716 } else if (ppd->host_link_state != HLS_GOING_UP) {
10721 * Wait for Link_Up physical state.
10722 * Physical and Logical states should already be
10723 * be transitioned to LinkUp and LinkInit respectively.
10725 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10728 "%s: physical state did not change to LINK-UP\n",
10733 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10736 "%s: logical state did not change to INIT\n",
10741 /* clear old transient LINKINIT_REASON code */
10742 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10743 ppd->linkinit_reason =
10744 OPA_LINKINIT_REASON_LINKUP;
10746 /* enable the port */
10747 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10749 handle_linkup_change(dd, 1);
10750 pio_kernel_linkup(dd);
10753 * After link up, a new link width will have been set.
10754 * Update the xmit counters with regards to the new
10757 update_xmit_counters(ppd, ppd->link_width_active);
10759 ppd->host_link_state = HLS_UP_INIT;
10760 update_statusp(ppd, IB_PORT_INIT);
10763 if (ppd->host_link_state != HLS_UP_INIT)
10766 if (!data_vls_operational(ppd)) {
10768 "%s: Invalid data VL credits or mtu\n",
10774 set_logical_state(dd, LSTATE_ARMED);
10775 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10778 "%s: logical state did not change to ARMED\n",
10782 ppd->host_link_state = HLS_UP_ARMED;
10783 update_statusp(ppd, IB_PORT_ARMED);
10785 * The simulator does not currently implement SMA messages,
10786 * so neighbor_normal is not set. Set it here when we first
10789 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10790 ppd->neighbor_normal = 1;
10792 case HLS_UP_ACTIVE:
10793 if (ppd->host_link_state != HLS_UP_ARMED)
10796 set_logical_state(dd, LSTATE_ACTIVE);
10797 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10800 "%s: logical state did not change to ACTIVE\n",
10803 /* tell all engines to go running */
10804 sdma_all_running(dd);
10805 ppd->host_link_state = HLS_UP_ACTIVE;
10806 update_statusp(ppd, IB_PORT_ACTIVE);
10808 /* Signal the IB layer that the port has went active */
10809 event.device = &dd->verbs_dev.rdi.ibdev;
10810 event.element.port_num = ppd->port;
10811 event.event = IB_EVENT_PORT_ACTIVE;
10815 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10816 ppd->host_link_state == HLS_DN_OFFLINE) &&
10819 /* Hand LED control to the DC */
10820 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10822 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10823 u8 tmp = ppd->link_enabled;
10825 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10827 ppd->link_enabled = tmp;
10830 ppd->remote_link_down_reason = 0;
10832 if (ppd->driver_link_ready)
10833 ppd->link_enabled = 1;
10836 set_all_slowpath(ppd->dd);
10837 ret = set_local_link_attributes(ppd);
10841 ppd->port_error_action = 0;
10843 if (quick_linkup) {
10844 /* quick linkup does not go into polling */
10845 ret = do_quick_linkup(dd);
10847 ret1 = set_physical_link_state(dd, PLS_POLLING);
10849 ret1 = wait_phys_link_out_of_offline(ppd,
10851 if (ret1 != HCMD_SUCCESS) {
10853 "Failed to transition to Polling link state, return 0x%x\n",
10860 * Change the host link state after requesting DC8051 to
10861 * change its physical state so that we can ignore any
10862 * interrupt with stale LNI(XX) error, which will not be
10863 * cleared until DC8051 transitions to Polling state.
10865 ppd->host_link_state = HLS_DN_POLL;
10866 ppd->offline_disabled_reason =
10867 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10869 * If an error occurred above, go back to offline. The
10870 * caller may reschedule another attempt.
10873 goto_offline(ppd, 0);
10875 log_physical_state(ppd, PLS_POLLING);
10877 case HLS_DN_DISABLE:
10878 /* link is disabled */
10879 ppd->link_enabled = 0;
10881 /* allow any state to transition to disabled */
10883 /* must transition to offline first */
10884 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10885 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10888 ppd->remote_link_down_reason = 0;
10891 if (!dd->dc_shutdown) {
10892 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10893 if (ret1 != HCMD_SUCCESS) {
10895 "Failed to transition to Disabled link state, return 0x%x\n",
10900 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10903 "%s: physical state did not change to DISABLED\n",
10909 ppd->host_link_state = HLS_DN_DISABLE;
10911 case HLS_DN_OFFLINE:
10912 if (ppd->host_link_state == HLS_DN_DISABLE)
10915 /* allow any state to transition to offline */
10916 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10918 ppd->remote_link_down_reason = 0;
10920 case HLS_VERIFY_CAP:
10921 if (ppd->host_link_state != HLS_DN_POLL)
10923 ppd->host_link_state = HLS_VERIFY_CAP;
10924 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10927 if (ppd->host_link_state != HLS_VERIFY_CAP)
10930 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10931 if (ret1 != HCMD_SUCCESS) {
10933 "Failed to transition to link up state, return 0x%x\n",
10938 ppd->host_link_state = HLS_GOING_UP;
10941 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10942 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10944 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10953 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10954 __func__, link_state_name(ppd->host_link_state),
10955 link_state_name(state));
10959 mutex_unlock(&ppd->hls_lock);
10962 ib_dispatch_event(&event);
10967 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10973 case HFI1_IB_CFG_LIDLMC:
10976 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10978 * The VL Arbitrator high limit is sent in units of 4k
10979 * bytes, while HFI stores it in units of 64 bytes.
10982 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10983 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10984 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10986 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10987 /* HFI only supports POLL as the default link down state */
10988 if (val != HLS_DN_POLL)
10991 case HFI1_IB_CFG_OP_VLS:
10992 if (ppd->vls_operational != val) {
10993 ppd->vls_operational = val;
10999 * For link width, link width downgrade, and speed enable, always AND
11000 * the setting with what is actually supported. This has two benefits.
11001 * First, enabled can't have unsupported values, no matter what the
11002 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
11003 * "fill in with your supported value" have all the bits in the
11004 * field set, so simply ANDing with supported has the desired result.
11006 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
11007 ppd->link_width_enabled = val & ppd->link_width_supported;
11009 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
11010 ppd->link_width_downgrade_enabled =
11011 val & ppd->link_width_downgrade_supported;
11013 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
11014 ppd->link_speed_enabled = val & ppd->link_speed_supported;
11016 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
11018 * HFI does not follow IB specs, save this value
11019 * so we can report it, if asked.
11021 ppd->overrun_threshold = val;
11023 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
11025 * HFI does not follow IB specs, save this value
11026 * so we can report it, if asked.
11028 ppd->phy_error_threshold = val;
11031 case HFI1_IB_CFG_MTU:
11032 set_send_length(ppd);
11035 case HFI1_IB_CFG_PKEYS:
11036 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
11037 set_partition_keys(ppd);
11041 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11042 dd_dev_info(ppd->dd,
11043 "%s: which %s, val 0x%x: not implemented\n",
11044 __func__, ib_cfg_name(which), val);
11050 /* begin functions related to vl arbitration table caching */
11051 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11055 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11056 VL_ARB_LOW_PRIO_TABLE_SIZE);
11057 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11058 VL_ARB_HIGH_PRIO_TABLE_SIZE);
11061 * Note that we always return values directly from the
11062 * 'vl_arb_cache' (and do no CSR reads) in response to a
11063 * 'Get(VLArbTable)'. This is obviously correct after a
11064 * 'Set(VLArbTable)', since the cache will then be up to
11065 * date. But it's also correct prior to any 'Set(VLArbTable)'
11066 * since then both the cache, and the relevant h/w registers
11070 for (i = 0; i < MAX_PRIO_TABLE; i++)
11071 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11075 * vl_arb_lock_cache
11077 * All other vl_arb_* functions should be called only after locking
11080 static inline struct vl_arb_cache *
11081 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11083 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11085 spin_lock(&ppd->vl_arb_cache[idx].lock);
11086 return &ppd->vl_arb_cache[idx];
11089 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11091 spin_unlock(&ppd->vl_arb_cache[idx].lock);
11094 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11095 struct ib_vl_weight_elem *vl)
11097 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11100 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11101 struct ib_vl_weight_elem *vl)
11103 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11106 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11107 struct ib_vl_weight_elem *vl)
11109 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11112 /* end functions related to vl arbitration table caching */
11114 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11115 u32 size, struct ib_vl_weight_elem *vl)
11117 struct hfi1_devdata *dd = ppd->dd;
11119 unsigned int i, is_up = 0;
11120 int drain, ret = 0;
11122 mutex_lock(&ppd->hls_lock);
11124 if (ppd->host_link_state & HLS_UP)
11127 drain = !is_ax(dd) && is_up;
11131 * Before adjusting VL arbitration weights, empty per-VL
11132 * FIFOs, otherwise a packet whose VL weight is being
11133 * set to 0 could get stuck in a FIFO with no chance to
11136 ret = stop_drain_data_vls(dd);
11141 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11146 for (i = 0; i < size; i++, vl++) {
11148 * NOTE: The low priority shift and mask are used here, but
11149 * they are the same for both the low and high registers.
11151 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11152 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11153 | (((u64)vl->weight
11154 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11155 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11156 write_csr(dd, target + (i * 8), reg);
11158 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11161 open_fill_data_vls(dd); /* reopen all VLs */
11164 mutex_unlock(&ppd->hls_lock);
11170 * Read one credit merge VL register.
11172 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11173 struct vl_limit *vll)
11175 u64 reg = read_csr(dd, csr);
11177 vll->dedicated = cpu_to_be16(
11178 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11179 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11180 vll->shared = cpu_to_be16(
11181 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11182 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11186 * Read the current credit merge limits.
11188 static int get_buffer_control(struct hfi1_devdata *dd,
11189 struct buffer_control *bc, u16 *overall_limit)
11194 /* not all entries are filled in */
11195 memset(bc, 0, sizeof(*bc));
11197 /* OPA and HFI have a 1-1 mapping */
11198 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11199 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11201 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11202 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11204 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11205 bc->overall_shared_limit = cpu_to_be16(
11206 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11207 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11209 *overall_limit = (reg
11210 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11211 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11212 return sizeof(struct buffer_control);
11215 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11220 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11221 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11222 for (i = 0; i < sizeof(u64); i++) {
11223 u8 byte = *(((u8 *)®) + i);
11225 dp->vlnt[2 * i] = byte & 0xf;
11226 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11229 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11230 for (i = 0; i < sizeof(u64); i++) {
11231 u8 byte = *(((u8 *)®) + i);
11233 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11234 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11236 return sizeof(struct sc2vlnt);
11239 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11240 struct ib_vl_weight_elem *vl)
11244 for (i = 0; i < nelems; i++, vl++) {
11250 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11252 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11254 0, dp->vlnt[0] & 0xf,
11255 1, dp->vlnt[1] & 0xf,
11256 2, dp->vlnt[2] & 0xf,
11257 3, dp->vlnt[3] & 0xf,
11258 4, dp->vlnt[4] & 0xf,
11259 5, dp->vlnt[5] & 0xf,
11260 6, dp->vlnt[6] & 0xf,
11261 7, dp->vlnt[7] & 0xf,
11262 8, dp->vlnt[8] & 0xf,
11263 9, dp->vlnt[9] & 0xf,
11264 10, dp->vlnt[10] & 0xf,
11265 11, dp->vlnt[11] & 0xf,
11266 12, dp->vlnt[12] & 0xf,
11267 13, dp->vlnt[13] & 0xf,
11268 14, dp->vlnt[14] & 0xf,
11269 15, dp->vlnt[15] & 0xf));
11270 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11271 DC_SC_VL_VAL(31_16,
11272 16, dp->vlnt[16] & 0xf,
11273 17, dp->vlnt[17] & 0xf,
11274 18, dp->vlnt[18] & 0xf,
11275 19, dp->vlnt[19] & 0xf,
11276 20, dp->vlnt[20] & 0xf,
11277 21, dp->vlnt[21] & 0xf,
11278 22, dp->vlnt[22] & 0xf,
11279 23, dp->vlnt[23] & 0xf,
11280 24, dp->vlnt[24] & 0xf,
11281 25, dp->vlnt[25] & 0xf,
11282 26, dp->vlnt[26] & 0xf,
11283 27, dp->vlnt[27] & 0xf,
11284 28, dp->vlnt[28] & 0xf,
11285 29, dp->vlnt[29] & 0xf,
11286 30, dp->vlnt[30] & 0xf,
11287 31, dp->vlnt[31] & 0xf));
11290 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11294 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11295 what, (int)limit, idx);
11298 /* change only the shared limit portion of SendCmGLobalCredit */
11299 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11303 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11304 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11305 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11306 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11309 /* change only the total credit limit portion of SendCmGLobalCredit */
11310 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11314 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11315 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11316 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11317 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11320 /* set the given per-VL shared limit */
11321 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11326 if (vl < TXE_NUM_DATA_VL)
11327 addr = SEND_CM_CREDIT_VL + (8 * vl);
11329 addr = SEND_CM_CREDIT_VL15;
11331 reg = read_csr(dd, addr);
11332 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11333 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11334 write_csr(dd, addr, reg);
11337 /* set the given per-VL dedicated limit */
11338 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11343 if (vl < TXE_NUM_DATA_VL)
11344 addr = SEND_CM_CREDIT_VL + (8 * vl);
11346 addr = SEND_CM_CREDIT_VL15;
11348 reg = read_csr(dd, addr);
11349 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11350 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11351 write_csr(dd, addr, reg);
11354 /* spin until the given per-VL status mask bits clear */
11355 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11358 unsigned long timeout;
11361 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11363 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11366 return; /* success */
11367 if (time_after(jiffies, timeout))
11368 break; /* timed out */
11373 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11374 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11376 * If this occurs, it is likely there was a credit loss on the link.
11377 * The only recovery from that is a link bounce.
11380 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11384 * The number of credits on the VLs may be changed while everything
11385 * is "live", but the following algorithm must be followed due to
11386 * how the hardware is actually implemented. In particular,
11387 * Return_Credit_Status[] is the only correct status check.
11389 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11390 * set Global_Shared_Credit_Limit = 0
11392 * mask0 = all VLs that are changing either dedicated or shared limits
11393 * set Shared_Limit[mask0] = 0
11394 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11395 * if (changing any dedicated limit)
11396 * mask1 = all VLs that are lowering dedicated limits
11397 * lower Dedicated_Limit[mask1]
11398 * spin until Return_Credit_Status[mask1] == 0
11399 * raise Dedicated_Limits
11400 * raise Shared_Limits
11401 * raise Global_Shared_Credit_Limit
11403 * lower = if the new limit is lower, set the limit to the new value
11404 * raise = if the new limit is higher than the current value (may be changed
11405 * earlier in the algorithm), set the new limit to the new value
11407 int set_buffer_control(struct hfi1_pportdata *ppd,
11408 struct buffer_control *new_bc)
11410 struct hfi1_devdata *dd = ppd->dd;
11411 u64 changing_mask, ld_mask, stat_mask;
11413 int i, use_all_mask;
11414 int this_shared_changing;
11415 int vl_count = 0, ret;
11417 * A0: add the variable any_shared_limit_changing below and in the
11418 * algorithm above. If removing A0 support, it can be removed.
11420 int any_shared_limit_changing;
11421 struct buffer_control cur_bc;
11422 u8 changing[OPA_MAX_VLS];
11423 u8 lowering_dedicated[OPA_MAX_VLS];
11426 const u64 all_mask =
11427 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11428 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11429 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11430 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11431 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11432 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11433 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11434 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11435 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11437 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11438 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11440 /* find the new total credits, do sanity check on unused VLs */
11441 for (i = 0; i < OPA_MAX_VLS; i++) {
11443 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11446 nonzero_msg(dd, i, "dedicated",
11447 be16_to_cpu(new_bc->vl[i].dedicated));
11448 nonzero_msg(dd, i, "shared",
11449 be16_to_cpu(new_bc->vl[i].shared));
11450 new_bc->vl[i].dedicated = 0;
11451 new_bc->vl[i].shared = 0;
11453 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11455 /* fetch the current values */
11456 get_buffer_control(dd, &cur_bc, &cur_total);
11459 * Create the masks we will use.
11461 memset(changing, 0, sizeof(changing));
11462 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11464 * NOTE: Assumes that the individual VL bits are adjacent and in
11468 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11472 any_shared_limit_changing = 0;
11473 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11476 this_shared_changing = new_bc->vl[i].shared
11477 != cur_bc.vl[i].shared;
11478 if (this_shared_changing)
11479 any_shared_limit_changing = 1;
11480 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11481 this_shared_changing) {
11483 changing_mask |= stat_mask;
11486 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11487 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11488 lowering_dedicated[i] = 1;
11489 ld_mask |= stat_mask;
11493 /* bracket the credit change with a total adjustment */
11494 if (new_total > cur_total)
11495 set_global_limit(dd, new_total);
11498 * Start the credit change algorithm.
11501 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11502 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11503 (is_ax(dd) && any_shared_limit_changing)) {
11504 set_global_shared(dd, 0);
11505 cur_bc.overall_shared_limit = 0;
11509 for (i = 0; i < NUM_USABLE_VLS; i++) {
11514 set_vl_shared(dd, i, 0);
11515 cur_bc.vl[i].shared = 0;
11519 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11522 if (change_count > 0) {
11523 for (i = 0; i < NUM_USABLE_VLS; i++) {
11527 if (lowering_dedicated[i]) {
11528 set_vl_dedicated(dd, i,
11529 be16_to_cpu(new_bc->
11531 cur_bc.vl[i].dedicated =
11532 new_bc->vl[i].dedicated;
11536 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11538 /* now raise all dedicated that are going up */
11539 for (i = 0; i < NUM_USABLE_VLS; i++) {
11543 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11544 be16_to_cpu(cur_bc.vl[i].dedicated))
11545 set_vl_dedicated(dd, i,
11546 be16_to_cpu(new_bc->
11551 /* next raise all shared that are going up */
11552 for (i = 0; i < NUM_USABLE_VLS; i++) {
11556 if (be16_to_cpu(new_bc->vl[i].shared) >
11557 be16_to_cpu(cur_bc.vl[i].shared))
11558 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11561 /* finally raise the global shared */
11562 if (be16_to_cpu(new_bc->overall_shared_limit) >
11563 be16_to_cpu(cur_bc.overall_shared_limit))
11564 set_global_shared(dd,
11565 be16_to_cpu(new_bc->overall_shared_limit));
11567 /* bracket the credit change with a total adjustment */
11568 if (new_total < cur_total)
11569 set_global_limit(dd, new_total);
11572 * Determine the actual number of operational VLS using the number of
11573 * dedicated and shared credits for each VL.
11575 if (change_count > 0) {
11576 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11577 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11578 be16_to_cpu(new_bc->vl[i].shared) > 0)
11580 ppd->actual_vls_operational = vl_count;
11581 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11582 ppd->actual_vls_operational :
11583 ppd->vls_operational,
11586 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11587 ppd->actual_vls_operational :
11588 ppd->vls_operational, NULL);
11596 * Read the given fabric manager table. Return the size of the
11597 * table (in bytes) on success, and a negative error code on
11600 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11604 struct vl_arb_cache *vlc;
11607 case FM_TBL_VL_HIGH_ARB:
11610 * OPA specifies 128 elements (of 2 bytes each), though
11611 * HFI supports only 16 elements in h/w.
11613 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11614 vl_arb_get_cache(vlc, t);
11615 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11617 case FM_TBL_VL_LOW_ARB:
11620 * OPA specifies 128 elements (of 2 bytes each), though
11621 * HFI supports only 16 elements in h/w.
11623 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11624 vl_arb_get_cache(vlc, t);
11625 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11627 case FM_TBL_BUFFER_CONTROL:
11628 size = get_buffer_control(ppd->dd, t, NULL);
11630 case FM_TBL_SC2VLNT:
11631 size = get_sc2vlnt(ppd->dd, t);
11633 case FM_TBL_VL_PREEMPT_ELEMS:
11635 /* OPA specifies 128 elements, of 2 bytes each */
11636 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11638 case FM_TBL_VL_PREEMPT_MATRIX:
11641 * OPA specifies that this is the same size as the VL
11642 * arbitration tables (i.e., 256 bytes).
11652 * Write the given fabric manager table.
11654 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11657 struct vl_arb_cache *vlc;
11660 case FM_TBL_VL_HIGH_ARB:
11661 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11662 if (vl_arb_match_cache(vlc, t)) {
11663 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11666 vl_arb_set_cache(vlc, t);
11667 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11668 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11669 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11671 case FM_TBL_VL_LOW_ARB:
11672 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11673 if (vl_arb_match_cache(vlc, t)) {
11674 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11677 vl_arb_set_cache(vlc, t);
11678 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11679 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11680 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11682 case FM_TBL_BUFFER_CONTROL:
11683 ret = set_buffer_control(ppd, t);
11685 case FM_TBL_SC2VLNT:
11686 set_sc2vlnt(ppd->dd, t);
11695 * Disable all data VLs.
11697 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11699 static int disable_data_vls(struct hfi1_devdata *dd)
11704 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11710 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11711 * Just re-enables all data VLs (the "fill" part happens
11712 * automatically - the name was chosen for symmetry with
11713 * stop_drain_data_vls()).
11715 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11717 int open_fill_data_vls(struct hfi1_devdata *dd)
11722 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11728 * drain_data_vls() - assumes that disable_data_vls() has been called,
11729 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11730 * engines to drop to 0.
11732 static void drain_data_vls(struct hfi1_devdata *dd)
11736 pause_for_credit_return(dd);
11740 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11742 * Use open_fill_data_vls() to resume using data VLs. This pair is
11743 * meant to be used like this:
11745 * stop_drain_data_vls(dd);
11746 * // do things with per-VL resources
11747 * open_fill_data_vls(dd);
11749 int stop_drain_data_vls(struct hfi1_devdata *dd)
11753 ret = disable_data_vls(dd);
11755 drain_data_vls(dd);
11761 * Convert a nanosecond time to a cclock count. No matter how slow
11762 * the cclock, a non-zero ns will always have a non-zero result.
11764 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11768 if (dd->icode == ICODE_FPGA_EMULATION)
11769 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11770 else /* simulation pretends to be ASIC */
11771 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11772 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11778 * Convert a cclock count to nanoseconds. Not matter how slow
11779 * the cclock, a non-zero cclocks will always have a non-zero result.
11781 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11785 if (dd->icode == ICODE_FPGA_EMULATION)
11786 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11787 else /* simulation pretends to be ASIC */
11788 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11789 if (cclocks && !ns)
11795 * Dynamically adjust the receive interrupt timeout for a context based on
11796 * incoming packet rate.
11798 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11800 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11802 struct hfi1_devdata *dd = rcd->dd;
11803 u32 timeout = rcd->rcvavail_timeout;
11806 * This algorithm doubles or halves the timeout depending on whether
11807 * the number of packets received in this interrupt were less than or
11808 * greater equal the interrupt count.
11810 * The calculations below do not allow a steady state to be achieved.
11811 * Only at the endpoints it is possible to have an unchanging
11814 if (npkts < rcv_intr_count) {
11816 * Not enough packets arrived before the timeout, adjust
11817 * timeout downward.
11819 if (timeout < 2) /* already at minimum? */
11824 * More than enough packets arrived before the timeout, adjust
11827 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11829 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11832 rcd->rcvavail_timeout = timeout;
11834 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11835 * been verified to be in range
11837 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11839 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11842 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11843 u32 intr_adjust, u32 npkts)
11845 struct hfi1_devdata *dd = rcd->dd;
11847 u32 ctxt = rcd->ctxt;
11850 * Need to write timeout register before updating RcvHdrHead to ensure
11851 * that a new value is used when the HW decides to restart counting.
11854 adjust_rcv_timeout(rcd, npkts);
11856 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11857 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11858 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11860 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11861 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11862 << RCV_HDR_HEAD_HEAD_SHIFT);
11863 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11866 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11870 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11871 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11873 if (hfi1_rcvhdrtail_kvaddr(rcd))
11874 tail = get_rcvhdrtail(rcd);
11876 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11878 return head == tail;
11882 * Context Control and Receive Array encoding for buffer size:
11891 * 0x8 512 KB (Receive Array only)
11892 * 0x9 1 MB (Receive Array only)
11893 * 0xa 2 MB (Receive Array only)
11895 * 0xB-0xF - reserved (Receive Array only)
11898 * This routine assumes that the value has already been sanity checked.
11900 static u32 encoded_size(u32 size)
11903 case 4 * 1024: return 0x1;
11904 case 8 * 1024: return 0x2;
11905 case 16 * 1024: return 0x3;
11906 case 32 * 1024: return 0x4;
11907 case 64 * 1024: return 0x5;
11908 case 128 * 1024: return 0x6;
11909 case 256 * 1024: return 0x7;
11910 case 512 * 1024: return 0x8;
11911 case 1 * 1024 * 1024: return 0x9;
11912 case 2 * 1024 * 1024: return 0xa;
11914 return 0x1; /* if invalid, go with the minimum size */
11918 * encode_rcv_header_entry_size - return chip specific encoding for size
11919 * @size: size in dwords
11921 * Convert a receive header entry size that to the encoding used in the CSR.
11923 * Return a zero if the given size is invalid, otherwise the encoding.
11925 u8 encode_rcv_header_entry_size(u8 size)
11927 /* there are only 3 valid receive header entry sizes */
11934 return 0; /* invalid */
11938 * hfi1_validate_rcvhdrcnt - validate hdrcnt
11939 * @dd: the device data
11940 * @thecnt: the header count
11942 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
11944 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
11945 dd_dev_err(dd, "Receive header queue count too small\n");
11949 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
11951 "Receive header queue count cannot be greater than %u\n",
11952 HFI1_MAX_HDRQ_EGRBUF_CNT);
11956 if (thecnt % HDRQ_INCREMENT) {
11957 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
11958 thecnt, HDRQ_INCREMENT);
11966 * set_hdrq_regs - set header queue registers for context
11967 * @dd: the device data
11968 * @ctxt: the context
11969 * @entsize: the dword entry size
11970 * @hdrcnt: the number of header entries
11972 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
11976 reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
11977 RCV_HDR_CNT_CNT_SHIFT;
11978 write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
11979 reg = ((u64)encode_rcv_header_entry_size(entsize) &
11980 RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
11981 RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
11982 write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
11983 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
11984 RCV_HDR_SIZE_HDR_SIZE_SHIFT;
11985 write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
11988 * Program dummy tail address for every receive context
11989 * before enabling any receive context
11991 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11992 dd->rcvhdrtail_dummy_dma);
11995 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11996 struct hfi1_ctxtdata *rcd)
11999 int did_enable = 0;
12007 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
12009 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
12010 /* if the context already enabled, don't do the extra steps */
12011 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
12012 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
12013 /* reset the tail and hdr addresses, and sequence count */
12014 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
12016 if (hfi1_rcvhdrtail_kvaddr(rcd))
12017 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12018 rcd->rcvhdrqtailaddr_dma);
12019 hfi1_set_seq_cnt(rcd, 1);
12021 /* reset the cached receive header queue head value */
12022 hfi1_set_rcd_head(rcd, 0);
12025 * Zero the receive header queue so we don't get false
12026 * positives when checking the sequence number. The
12027 * sequence numbers could land exactly on the same spot.
12028 * E.g. a rcd restart before the receive header wrapped.
12030 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
12032 /* starting timeout */
12033 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
12035 /* enable the context */
12036 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
12038 /* clean the egr buffer size first */
12039 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12040 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
12041 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
12042 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
12044 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
12045 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
12048 /* zero RcvEgrIndexHead */
12049 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
12051 /* set eager count and base index */
12052 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
12053 & RCV_EGR_CTRL_EGR_CNT_MASK)
12054 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
12055 (((rcd->eager_base >> RCV_SHIFT)
12056 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
12057 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
12058 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
12061 * Set TID (expected) count and base index.
12062 * rcd->expected_count is set to individual RcvArray entries,
12063 * not pairs, and the CSR takes a pair-count in groups of
12064 * four, so divide by 8.
12066 reg = (((rcd->expected_count >> RCV_SHIFT)
12067 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
12068 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
12069 (((rcd->expected_base >> RCV_SHIFT)
12070 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
12071 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
12072 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
12073 if (ctxt == HFI1_CTRL_CTXT)
12074 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
12076 if (op & HFI1_RCVCTRL_CTXT_DIS) {
12077 write_csr(dd, RCV_VL15, 0);
12079 * When receive context is being disabled turn on tail
12080 * update with a dummy tail address and then disable
12083 if (dd->rcvhdrtail_dummy_dma) {
12084 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12085 dd->rcvhdrtail_dummy_dma);
12086 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
12087 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12090 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
12092 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
12093 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12094 IS_RCVAVAIL_START + rcd->ctxt, true);
12095 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12097 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
12098 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12099 IS_RCVAVAIL_START + rcd->ctxt, false);
12100 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12102 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
12103 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12104 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
12105 /* See comment on RcvCtxtCtrl.TailUpd above */
12106 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
12107 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12109 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
12110 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12111 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
12112 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12113 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
12115 * In one-packet-per-eager mode, the size comes from
12116 * the RcvArray entry.
12118 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12119 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12121 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12122 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12123 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12124 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12125 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12126 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12127 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12128 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12129 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12130 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12131 if (op & HFI1_RCVCTRL_URGENT_ENB)
12132 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12133 IS_RCVURGENT_START + rcd->ctxt, true);
12134 if (op & HFI1_RCVCTRL_URGENT_DIS)
12135 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12136 IS_RCVURGENT_START + rcd->ctxt, false);
12138 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12139 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12141 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12143 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12144 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12146 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12148 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12149 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12150 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12151 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12152 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12153 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12154 ctxt, reg, reg == 0 ? "not" : "still");
12160 * The interrupt timeout and count must be set after
12161 * the context is enabled to take effect.
12163 /* set interrupt timeout */
12164 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12165 (u64)rcd->rcvavail_timeout <<
12166 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12168 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12169 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12170 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12173 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12175 * If the context has been disabled and the Tail Update has
12176 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12177 * so it doesn't contain an address that is invalid.
12179 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12180 dd->rcvhdrtail_dummy_dma);
12183 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12189 ret = dd->cntrnameslen;
12190 *namep = dd->cntrnames;
12192 const struct cntr_entry *entry;
12195 ret = (dd->ndevcntrs) * sizeof(u64);
12197 /* Get the start of the block of counters */
12198 *cntrp = dd->cntrs;
12201 * Now go and fill in each counter in the block.
12203 for (i = 0; i < DEV_CNTR_LAST; i++) {
12204 entry = &dev_cntrs[i];
12205 hfi1_cdbg(CNTR, "reading %s", entry->name);
12206 if (entry->flags & CNTR_DISABLED) {
12208 hfi1_cdbg(CNTR, "\tDisabled\n");
12210 if (entry->flags & CNTR_VL) {
12211 hfi1_cdbg(CNTR, "\tPer VL\n");
12212 for (j = 0; j < C_VL_COUNT; j++) {
12213 val = entry->rw_cntr(entry,
12219 "\t\tRead 0x%llx for %d\n",
12221 dd->cntrs[entry->offset + j] =
12224 } else if (entry->flags & CNTR_SDMA) {
12226 "\t Per SDMA Engine\n");
12227 for (j = 0; j < chip_sdma_engines(dd);
12230 entry->rw_cntr(entry, dd, j,
12233 "\t\tRead 0x%llx for %d\n",
12235 dd->cntrs[entry->offset + j] =
12239 val = entry->rw_cntr(entry, dd,
12242 dd->cntrs[entry->offset] = val;
12243 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12252 * Used by sysfs to create files for hfi stats to read
12254 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12260 ret = ppd->dd->portcntrnameslen;
12261 *namep = ppd->dd->portcntrnames;
12263 const struct cntr_entry *entry;
12266 ret = ppd->dd->nportcntrs * sizeof(u64);
12267 *cntrp = ppd->cntrs;
12269 for (i = 0; i < PORT_CNTR_LAST; i++) {
12270 entry = &port_cntrs[i];
12271 hfi1_cdbg(CNTR, "reading %s", entry->name);
12272 if (entry->flags & CNTR_DISABLED) {
12274 hfi1_cdbg(CNTR, "\tDisabled\n");
12278 if (entry->flags & CNTR_VL) {
12279 hfi1_cdbg(CNTR, "\tPer VL");
12280 for (j = 0; j < C_VL_COUNT; j++) {
12281 val = entry->rw_cntr(entry, ppd, j,
12286 "\t\tRead 0x%llx for %d",
12288 ppd->cntrs[entry->offset + j] = val;
12291 val = entry->rw_cntr(entry, ppd,
12295 ppd->cntrs[entry->offset] = val;
12296 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12303 static void free_cntrs(struct hfi1_devdata *dd)
12305 struct hfi1_pportdata *ppd;
12308 if (dd->synth_stats_timer.function)
12309 del_timer_sync(&dd->synth_stats_timer);
12310 ppd = (struct hfi1_pportdata *)(dd + 1);
12311 for (i = 0; i < dd->num_pports; i++, ppd++) {
12313 kfree(ppd->scntrs);
12314 free_percpu(ppd->ibport_data.rvp.rc_acks);
12315 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12316 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12318 ppd->scntrs = NULL;
12319 ppd->ibport_data.rvp.rc_acks = NULL;
12320 ppd->ibport_data.rvp.rc_qacks = NULL;
12321 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12323 kfree(dd->portcntrnames);
12324 dd->portcntrnames = NULL;
12329 kfree(dd->cntrnames);
12330 dd->cntrnames = NULL;
12331 if (dd->update_cntr_wq) {
12332 destroy_workqueue(dd->update_cntr_wq);
12333 dd->update_cntr_wq = NULL;
12337 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12338 u64 *psval, void *context, int vl)
12343 if (entry->flags & CNTR_DISABLED) {
12344 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12348 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12350 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12352 /* If its a synthetic counter there is more work we need to do */
12353 if (entry->flags & CNTR_SYNTH) {
12354 if (sval == CNTR_MAX) {
12355 /* No need to read already saturated */
12359 if (entry->flags & CNTR_32BIT) {
12360 /* 32bit counters can wrap multiple times */
12361 u64 upper = sval >> 32;
12362 u64 lower = (sval << 32) >> 32;
12364 if (lower > val) { /* hw wrapped */
12365 if (upper == CNTR_32BIT_MAX)
12371 if (val != CNTR_MAX)
12372 val = (upper << 32) | val;
12375 /* If we rolled we are saturated */
12376 if ((val < sval) || (val > CNTR_MAX))
12383 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12388 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12389 struct cntr_entry *entry,
12390 u64 *psval, void *context, int vl, u64 data)
12394 if (entry->flags & CNTR_DISABLED) {
12395 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12399 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12401 if (entry->flags & CNTR_SYNTH) {
12403 if (entry->flags & CNTR_32BIT) {
12404 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12405 (data << 32) >> 32);
12406 val = data; /* return the full 64bit value */
12408 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12412 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12417 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12422 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12424 struct cntr_entry *entry;
12427 entry = &dev_cntrs[index];
12428 sval = dd->scntrs + entry->offset;
12430 if (vl != CNTR_INVALID_VL)
12433 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12436 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12438 struct cntr_entry *entry;
12441 entry = &dev_cntrs[index];
12442 sval = dd->scntrs + entry->offset;
12444 if (vl != CNTR_INVALID_VL)
12447 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12450 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12452 struct cntr_entry *entry;
12455 entry = &port_cntrs[index];
12456 sval = ppd->scntrs + entry->offset;
12458 if (vl != CNTR_INVALID_VL)
12461 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12462 (index <= C_RCV_HDR_OVF_LAST)) {
12463 /* We do not want to bother for disabled contexts */
12467 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12470 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12472 struct cntr_entry *entry;
12475 entry = &port_cntrs[index];
12476 sval = ppd->scntrs + entry->offset;
12478 if (vl != CNTR_INVALID_VL)
12481 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12482 (index <= C_RCV_HDR_OVF_LAST)) {
12483 /* We do not want to bother for disabled contexts */
12487 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12490 static void do_update_synth_timer(struct work_struct *work)
12497 struct hfi1_pportdata *ppd;
12498 struct cntr_entry *entry;
12499 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12503 * Rather than keep beating on the CSRs pick a minimal set that we can
12504 * check to watch for potential roll over. We can do this by looking at
12505 * the number of flits sent/recv. If the total flits exceeds 32bits then
12506 * we have to iterate all the counters and update.
12508 entry = &dev_cntrs[C_DC_RCV_FLITS];
12509 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12511 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12512 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12516 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12517 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12519 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12521 * May not be strictly necessary to update but it won't hurt and
12522 * simplifies the logic here.
12525 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12528 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12530 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12531 total_flits, (u64)CNTR_32BIT_MAX);
12532 if (total_flits >= CNTR_32BIT_MAX) {
12533 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12540 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12541 for (i = 0; i < DEV_CNTR_LAST; i++) {
12542 entry = &dev_cntrs[i];
12543 if (entry->flags & CNTR_VL) {
12544 for (vl = 0; vl < C_VL_COUNT; vl++)
12545 read_dev_cntr(dd, i, vl);
12547 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12550 ppd = (struct hfi1_pportdata *)(dd + 1);
12551 for (i = 0; i < dd->num_pports; i++, ppd++) {
12552 for (j = 0; j < PORT_CNTR_LAST; j++) {
12553 entry = &port_cntrs[j];
12554 if (entry->flags & CNTR_VL) {
12555 for (vl = 0; vl < C_VL_COUNT; vl++)
12556 read_port_cntr(ppd, j, vl);
12558 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12564 * We want the value in the register. The goal is to keep track
12565 * of the number of "ticks" not the counter value. In other
12566 * words if the register rolls we want to notice it and go ahead
12567 * and force an update.
12569 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12570 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12573 entry = &dev_cntrs[C_DC_RCV_FLITS];
12574 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12577 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12578 dd->unit, dd->last_tx, dd->last_rx);
12581 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12585 static void update_synth_timer(struct timer_list *t)
12587 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12589 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12590 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12593 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12594 static int init_cntrs(struct hfi1_devdata *dd)
12596 int i, rcv_ctxts, j;
12599 char name[C_MAX_NAME];
12600 struct hfi1_pportdata *ppd;
12601 const char *bit_type_32 = ",32";
12602 const int bit_type_32_sz = strlen(bit_type_32);
12603 u32 sdma_engines = chip_sdma_engines(dd);
12605 /* set up the stats timer; the add_timer is done at the end */
12606 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12608 /***********************/
12609 /* per device counters */
12610 /***********************/
12612 /* size names and determine how many we have*/
12616 for (i = 0; i < DEV_CNTR_LAST; i++) {
12617 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12618 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12622 if (dev_cntrs[i].flags & CNTR_VL) {
12623 dev_cntrs[i].offset = dd->ndevcntrs;
12624 for (j = 0; j < C_VL_COUNT; j++) {
12625 snprintf(name, C_MAX_NAME, "%s%d",
12626 dev_cntrs[i].name, vl_from_idx(j));
12627 sz += strlen(name);
12628 /* Add ",32" for 32-bit counters */
12629 if (dev_cntrs[i].flags & CNTR_32BIT)
12630 sz += bit_type_32_sz;
12634 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12635 dev_cntrs[i].offset = dd->ndevcntrs;
12636 for (j = 0; j < sdma_engines; j++) {
12637 snprintf(name, C_MAX_NAME, "%s%d",
12638 dev_cntrs[i].name, j);
12639 sz += strlen(name);
12640 /* Add ",32" for 32-bit counters */
12641 if (dev_cntrs[i].flags & CNTR_32BIT)
12642 sz += bit_type_32_sz;
12647 /* +1 for newline. */
12648 sz += strlen(dev_cntrs[i].name) + 1;
12649 /* Add ",32" for 32-bit counters */
12650 if (dev_cntrs[i].flags & CNTR_32BIT)
12651 sz += bit_type_32_sz;
12652 dev_cntrs[i].offset = dd->ndevcntrs;
12657 /* allocate space for the counter values */
12658 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12663 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12667 /* allocate space for the counter names */
12668 dd->cntrnameslen = sz;
12669 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12670 if (!dd->cntrnames)
12673 /* fill in the names */
12674 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12675 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12677 } else if (dev_cntrs[i].flags & CNTR_VL) {
12678 for (j = 0; j < C_VL_COUNT; j++) {
12679 snprintf(name, C_MAX_NAME, "%s%d",
12682 memcpy(p, name, strlen(name));
12685 /* Counter is 32 bits */
12686 if (dev_cntrs[i].flags & CNTR_32BIT) {
12687 memcpy(p, bit_type_32, bit_type_32_sz);
12688 p += bit_type_32_sz;
12693 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12694 for (j = 0; j < sdma_engines; j++) {
12695 snprintf(name, C_MAX_NAME, "%s%d",
12696 dev_cntrs[i].name, j);
12697 memcpy(p, name, strlen(name));
12700 /* Counter is 32 bits */
12701 if (dev_cntrs[i].flags & CNTR_32BIT) {
12702 memcpy(p, bit_type_32, bit_type_32_sz);
12703 p += bit_type_32_sz;
12709 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12710 p += strlen(dev_cntrs[i].name);
12712 /* Counter is 32 bits */
12713 if (dev_cntrs[i].flags & CNTR_32BIT) {
12714 memcpy(p, bit_type_32, bit_type_32_sz);
12715 p += bit_type_32_sz;
12722 /*********************/
12723 /* per port counters */
12724 /*********************/
12727 * Go through the counters for the overflows and disable the ones we
12728 * don't need. This varies based on platform so we need to do it
12729 * dynamically here.
12731 rcv_ctxts = dd->num_rcv_contexts;
12732 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12733 i <= C_RCV_HDR_OVF_LAST; i++) {
12734 port_cntrs[i].flags |= CNTR_DISABLED;
12737 /* size port counter names and determine how many we have*/
12739 dd->nportcntrs = 0;
12740 for (i = 0; i < PORT_CNTR_LAST; i++) {
12741 if (port_cntrs[i].flags & CNTR_DISABLED) {
12742 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12746 if (port_cntrs[i].flags & CNTR_VL) {
12747 port_cntrs[i].offset = dd->nportcntrs;
12748 for (j = 0; j < C_VL_COUNT; j++) {
12749 snprintf(name, C_MAX_NAME, "%s%d",
12750 port_cntrs[i].name, vl_from_idx(j));
12751 sz += strlen(name);
12752 /* Add ",32" for 32-bit counters */
12753 if (port_cntrs[i].flags & CNTR_32BIT)
12754 sz += bit_type_32_sz;
12759 /* +1 for newline */
12760 sz += strlen(port_cntrs[i].name) + 1;
12761 /* Add ",32" for 32-bit counters */
12762 if (port_cntrs[i].flags & CNTR_32BIT)
12763 sz += bit_type_32_sz;
12764 port_cntrs[i].offset = dd->nportcntrs;
12769 /* allocate space for the counter names */
12770 dd->portcntrnameslen = sz;
12771 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12772 if (!dd->portcntrnames)
12775 /* fill in port cntr names */
12776 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12777 if (port_cntrs[i].flags & CNTR_DISABLED)
12780 if (port_cntrs[i].flags & CNTR_VL) {
12781 for (j = 0; j < C_VL_COUNT; j++) {
12782 snprintf(name, C_MAX_NAME, "%s%d",
12783 port_cntrs[i].name, vl_from_idx(j));
12784 memcpy(p, name, strlen(name));
12787 /* Counter is 32 bits */
12788 if (port_cntrs[i].flags & CNTR_32BIT) {
12789 memcpy(p, bit_type_32, bit_type_32_sz);
12790 p += bit_type_32_sz;
12796 memcpy(p, port_cntrs[i].name,
12797 strlen(port_cntrs[i].name));
12798 p += strlen(port_cntrs[i].name);
12800 /* Counter is 32 bits */
12801 if (port_cntrs[i].flags & CNTR_32BIT) {
12802 memcpy(p, bit_type_32, bit_type_32_sz);
12803 p += bit_type_32_sz;
12810 /* allocate per port storage for counter values */
12811 ppd = (struct hfi1_pportdata *)(dd + 1);
12812 for (i = 0; i < dd->num_pports; i++, ppd++) {
12813 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12817 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12822 /* CPU counters need to be allocated and zeroed */
12823 if (init_cpu_counters(dd))
12826 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12827 WQ_MEM_RECLAIM, dd->unit);
12828 if (!dd->update_cntr_wq)
12831 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12833 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12840 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12842 switch (chip_lstate) {
12844 return IB_PORT_DOWN;
12846 return IB_PORT_INIT;
12848 return IB_PORT_ARMED;
12849 case LSTATE_ACTIVE:
12850 return IB_PORT_ACTIVE;
12853 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12855 return IB_PORT_DOWN;
12859 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12861 /* look at the HFI meta-states only */
12862 switch (chip_pstate & 0xf0) {
12864 return IB_PORTPHYSSTATE_DISABLED;
12866 return OPA_PORTPHYSSTATE_OFFLINE;
12868 return IB_PORTPHYSSTATE_POLLING;
12869 case PLS_CONFIGPHY:
12870 return IB_PORTPHYSSTATE_TRAINING;
12872 return IB_PORTPHYSSTATE_LINKUP;
12874 return IB_PORTPHYSSTATE_PHY_TEST;
12876 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12878 return IB_PORTPHYSSTATE_DISABLED;
12882 /* return the OPA port logical state name */
12883 const char *opa_lstate_name(u32 lstate)
12885 static const char * const port_logical_names[] = {
12891 "PORT_ACTIVE_DEFER",
12893 if (lstate < ARRAY_SIZE(port_logical_names))
12894 return port_logical_names[lstate];
12898 /* return the OPA port physical state name */
12899 const char *opa_pstate_name(u32 pstate)
12901 static const char * const port_physical_names[] = {
12908 "PHYS_LINK_ERR_RECOVER",
12915 if (pstate < ARRAY_SIZE(port_physical_names))
12916 return port_physical_names[pstate];
12921 * update_statusp - Update userspace status flag
12922 * @ppd: Port data structure
12923 * @state: port state information
12925 * Actual port status is determined by the host_link_state value
12928 * host_link_state MUST be updated before updating the user space
12931 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12934 * Set port status flags in the page mapped into userspace
12935 * memory. Do it here to ensure a reliable state - this is
12936 * the only function called by all state handling code.
12937 * Always set the flags due to the fact that the cache value
12938 * might have been changed explicitly outside of this
12941 if (ppd->statusp) {
12945 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12946 HFI1_STATUS_IB_READY);
12948 case IB_PORT_ARMED:
12949 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12951 case IB_PORT_ACTIVE:
12952 *ppd->statusp |= HFI1_STATUS_IB_READY;
12956 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12957 opa_lstate_name(state), state);
12961 * wait_logical_linkstate - wait for an IB link state change to occur
12962 * @ppd: port device
12963 * @state: the state to wait for
12964 * @msecs: the number of milliseconds to wait
12966 * Wait up to msecs milliseconds for IB link state change to occur.
12967 * For now, take the easy polling route.
12968 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12970 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12973 unsigned long timeout;
12976 timeout = jiffies + msecs_to_jiffies(msecs);
12978 new_state = chip_to_opa_lstate(ppd->dd,
12979 read_logical_state(ppd->dd));
12980 if (new_state == state)
12982 if (time_after(jiffies, timeout)) {
12983 dd_dev_err(ppd->dd,
12984 "timeout waiting for link state 0x%x\n",
12994 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12996 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12998 dd_dev_info(ppd->dd,
12999 "physical state changed to %s (0x%x), phy 0x%x\n",
13000 opa_pstate_name(ib_pstate), ib_pstate, state);
13004 * Read the physical hardware link state and check if it matches host
13005 * drivers anticipated state.
13007 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
13009 u32 read_state = read_physical_state(ppd->dd);
13011 if (read_state == state) {
13012 log_state_transition(ppd, state);
13014 dd_dev_err(ppd->dd,
13015 "anticipated phy link state 0x%x, read 0x%x\n",
13016 state, read_state);
13021 * wait_physical_linkstate - wait for an physical link state change to occur
13022 * @ppd: port device
13023 * @state: the state to wait for
13024 * @msecs: the number of milliseconds to wait
13026 * Wait up to msecs milliseconds for physical link state change to occur.
13027 * Returns 0 if state reached, otherwise -ETIMEDOUT.
13029 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
13033 unsigned long timeout;
13035 timeout = jiffies + msecs_to_jiffies(msecs);
13037 read_state = read_physical_state(ppd->dd);
13038 if (read_state == state)
13040 if (time_after(jiffies, timeout)) {
13041 dd_dev_err(ppd->dd,
13042 "timeout waiting for phy link state 0x%x\n",
13046 usleep_range(1950, 2050); /* sleep 2ms-ish */
13049 log_state_transition(ppd, state);
13054 * wait_phys_link_offline_quiet_substates - wait for any offline substate
13055 * @ppd: port device
13056 * @msecs: the number of milliseconds to wait
13058 * Wait up to msecs milliseconds for any offline physical link
13059 * state change to occur.
13060 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13062 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
13066 unsigned long timeout;
13068 timeout = jiffies + msecs_to_jiffies(msecs);
13070 read_state = read_physical_state(ppd->dd);
13071 if ((read_state & 0xF0) == PLS_OFFLINE)
13073 if (time_after(jiffies, timeout)) {
13074 dd_dev_err(ppd->dd,
13075 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
13076 read_state, msecs);
13079 usleep_range(1950, 2050); /* sleep 2ms-ish */
13082 log_state_transition(ppd, read_state);
13087 * wait_phys_link_out_of_offline - wait for any out of offline state
13088 * @ppd: port device
13089 * @msecs: the number of milliseconds to wait
13091 * Wait up to msecs milliseconds for any out of offline physical link
13092 * state change to occur.
13093 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13095 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
13099 unsigned long timeout;
13101 timeout = jiffies + msecs_to_jiffies(msecs);
13103 read_state = read_physical_state(ppd->dd);
13104 if ((read_state & 0xF0) != PLS_OFFLINE)
13106 if (time_after(jiffies, timeout)) {
13107 dd_dev_err(ppd->dd,
13108 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
13109 read_state, msecs);
13112 usleep_range(1950, 2050); /* sleep 2ms-ish */
13115 log_state_transition(ppd, read_state);
13119 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
13120 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13122 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
13123 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13125 void hfi1_init_ctxt(struct send_context *sc)
13128 struct hfi1_devdata *dd = sc->dd;
13130 u8 set = (sc->type == SC_USER ?
13131 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13132 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13133 reg = read_kctxt_csr(dd, sc->hw_context,
13134 SEND_CTXT_CHECK_ENABLE);
13136 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13138 SET_STATIC_RATE_CONTROL_SMASK(reg);
13139 write_kctxt_csr(dd, sc->hw_context,
13140 SEND_CTXT_CHECK_ENABLE, reg);
13144 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13149 if (dd->icode != ICODE_RTL_SILICON) {
13150 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13151 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13155 reg = read_csr(dd, ASIC_STS_THERM);
13156 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13157 ASIC_STS_THERM_CURR_TEMP_MASK);
13158 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13159 ASIC_STS_THERM_LO_TEMP_MASK);
13160 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13161 ASIC_STS_THERM_HI_TEMP_MASK);
13162 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13163 ASIC_STS_THERM_CRIT_TEMP_MASK);
13164 /* triggers is a 3-bit value - 1 bit per trigger. */
13165 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13170 /* ========================================================================= */
13173 * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13174 * @dd: valid devdata
13175 * @src: IRQ source to determine register index from
13176 * @bits: the bits to set or clear
13177 * @set: true == set the bits, false == clear the bits
13180 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13184 u16 idx = src / BITS_PER_REGISTER;
13186 spin_lock(&dd->irq_src_lock);
13187 reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13192 write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13193 spin_unlock(&dd->irq_src_lock);
13197 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13198 * @dd: valid devdata
13199 * @first: first IRQ source to set/clear
13200 * @last: last IRQ source (inclusive) to set/clear
13201 * @set: true == set the bits, false == clear the bits
13203 * If first == last, set the exact source.
13205 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13211 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13217 for (src = first; src <= last; src++) {
13218 bit = src % BITS_PER_REGISTER;
13219 /* wrapped to next register? */
13220 if (!bit && bits) {
13221 read_mod_write(dd, src - 1, bits, set);
13224 bits |= BIT_ULL(bit);
13226 read_mod_write(dd, last, bits, set);
13232 * Clear all interrupt sources on the chip.
13234 void clear_all_interrupts(struct hfi1_devdata *dd)
13238 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13239 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13241 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13242 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13243 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13244 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13245 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13246 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13247 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13248 for (i = 0; i < chip_send_contexts(dd); i++)
13249 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13250 for (i = 0; i < chip_sdma_engines(dd); i++)
13251 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13253 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13254 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13255 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13259 * Remap the interrupt source from the general handler to the given MSI-X
13262 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13267 /* clear from the handled mask of the general interrupt */
13270 if (likely(m < CCE_NUM_INT_CSRS)) {
13271 dd->gi_mask[m] &= ~((u64)1 << n);
13273 dd_dev_err(dd, "remap interrupt err\n");
13277 /* direct the chip source to the given MSI-X interrupt */
13280 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13281 reg &= ~((u64)0xff << (8 * n));
13282 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13283 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13286 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13289 * SDMA engine interrupt sources grouped by type, rather than
13290 * engine. Per-engine interrupts are as follows:
13295 remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13296 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13297 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13301 * Set the general handler to accept all interrupts, remap all
13302 * chip interrupts back to MSI-X 0.
13304 void reset_interrupts(struct hfi1_devdata *dd)
13308 /* all interrupts handled by the general handler */
13309 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13310 dd->gi_mask[i] = ~(u64)0;
13312 /* all chip interrupts map to MSI-X 0 */
13313 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13314 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13318 * set_up_interrupts() - Initialize the IRQ resources and state
13319 * @dd: valid devdata
13322 static int set_up_interrupts(struct hfi1_devdata *dd)
13326 /* mask all interrupts */
13327 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13329 /* clear all pending interrupts */
13330 clear_all_interrupts(dd);
13332 /* reset general handler mask, chip MSI-X mappings */
13333 reset_interrupts(dd);
13335 /* ask for MSI-X interrupts */
13336 ret = msix_initialize(dd);
13340 ret = msix_request_irqs(dd);
13342 msix_clean_up_interrupts(dd);
13348 * Set up context values in dd. Sets:
13350 * num_rcv_contexts - number of contexts being used
13351 * n_krcv_queues - number of kernel contexts
13352 * first_dyn_alloc_ctxt - first dynamically allocated context
13353 * in array of contexts
13354 * freectxts - number of free user contexts
13355 * num_send_contexts - number of PIO send contexts being used
13356 * num_netdev_contexts - number of contexts reserved for netdev
13358 static int set_up_context_variables(struct hfi1_devdata *dd)
13360 unsigned long num_kernel_contexts;
13361 u16 num_netdev_contexts;
13366 u32 send_contexts = chip_send_contexts(dd);
13367 u32 rcv_contexts = chip_rcv_contexts(dd);
13370 * Kernel receive contexts:
13371 * - Context 0 - control context (VL15/multicast/error)
13372 * - Context 1 - first kernel context
13373 * - Context 2 - second kernel context
13378 * n_krcvqs is the sum of module parameter kernel receive
13379 * contexts, krcvqs[]. It does not include the control
13380 * context, so add that.
13382 num_kernel_contexts = n_krcvqs + 1;
13384 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13386 * Every kernel receive context needs an ACK send context.
13387 * one send context is allocated for each VL{0-7} and VL15
13389 if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13391 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13392 send_contexts - num_vls - 1,
13393 num_kernel_contexts);
13394 num_kernel_contexts = send_contexts - num_vls - 1;
13399 * - default to 1 user context per real (non-HT) CPU core if
13400 * num_user_contexts is negative
13402 if (num_user_contexts < 0)
13403 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13405 n_usr_ctxts = num_user_contexts;
13407 * Adjust the counts given a global max.
13409 if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
13411 "Reducing # user receive contexts to: %u, from %u\n",
13412 (u32)(rcv_contexts - num_kernel_contexts),
13415 n_usr_ctxts = rcv_contexts - num_kernel_contexts;
13418 num_netdev_contexts =
13419 hfi1_num_netdev_contexts(dd, rcv_contexts -
13420 (num_kernel_contexts + n_usr_ctxts),
13421 &node_affinity.real_cpu_mask);
13423 * RMT entries are allocated as follows:
13424 * 1. QOS (0 to 128 entries)
13425 * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts +
13426 * num_netdev_contexts [b])
13427 * 3. netdev (NUM_NETDEV_MAP_ENTRIES)
13430 * [a] Kernel contexts (except control) are included in FECN if kernel
13431 * TID_RDMA is active.
13432 * [b] Netdev and user contexts are randomly allocated from the same
13433 * context pool, so FECN must cover all contexts in the pool.
13435 rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL)
13436 + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1
13439 + num_netdev_contexts
13440 + NUM_NETDEV_MAP_ENTRIES;
13441 if (rmt_count > NUM_MAP_ENTRIES) {
13442 int over = rmt_count - NUM_MAP_ENTRIES;
13443 /* try to squish user contexts, minimum of 1 */
13444 if (over >= n_usr_ctxts) {
13445 dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n");
13448 dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n",
13449 n_usr_ctxts, n_usr_ctxts - over);
13450 n_usr_ctxts -= over;
13453 /* the first N are kernel contexts, the rest are user/netdev contexts */
13454 dd->num_rcv_contexts =
13455 num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
13456 dd->n_krcv_queues = num_kernel_contexts;
13457 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13458 dd->num_netdev_contexts = num_netdev_contexts;
13459 dd->num_user_contexts = n_usr_ctxts;
13460 dd->freectxts = n_usr_ctxts;
13462 "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
13464 (int)dd->num_rcv_contexts,
13465 (int)dd->n_krcv_queues,
13466 dd->num_netdev_contexts,
13467 dd->num_user_contexts);
13470 * Receive array allocation:
13471 * All RcvArray entries are divided into groups of 8. This
13472 * is required by the hardware and will speed up writes to
13473 * consecutive entries by using write-combining of the entire
13476 * The number of groups are evenly divided among all contexts.
13477 * any left over groups will be given to the first N user
13480 dd->rcv_entries.group_size = RCV_INCREMENT;
13481 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13482 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13483 dd->rcv_entries.nctxt_extra = ngroups -
13484 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13485 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13486 dd->rcv_entries.ngroups,
13487 dd->rcv_entries.nctxt_extra);
13488 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13489 MAX_EAGER_ENTRIES * 2) {
13490 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13491 dd->rcv_entries.group_size;
13493 "RcvArray group count too high, change to %u\n",
13494 dd->rcv_entries.ngroups);
13495 dd->rcv_entries.nctxt_extra = 0;
13498 * PIO send contexts
13500 ret = init_sc_pools_and_sizes(dd);
13501 if (ret >= 0) { /* success */
13502 dd->num_send_contexts = ret;
13505 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13507 dd->num_send_contexts,
13508 dd->sc_sizes[SC_KERNEL].count,
13509 dd->sc_sizes[SC_ACK].count,
13510 dd->sc_sizes[SC_USER].count,
13511 dd->sc_sizes[SC_VL15].count);
13512 ret = 0; /* success */
13519 * Set the device/port partition key table. The MAD code
13520 * will ensure that, at least, the partial management
13521 * partition key is present in the table.
13523 static void set_partition_keys(struct hfi1_pportdata *ppd)
13525 struct hfi1_devdata *dd = ppd->dd;
13529 dd_dev_info(dd, "Setting partition keys\n");
13530 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13531 reg |= (ppd->pkeys[i] &
13532 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13534 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13535 /* Each register holds 4 PKey values. */
13536 if ((i % 4) == 3) {
13537 write_csr(dd, RCV_PARTITION_KEY +
13538 ((i - 3) * 2), reg);
13543 /* Always enable HW pkeys check when pkeys table is set */
13544 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13548 * These CSRs and memories are uninitialized on reset and must be
13549 * written before reading to set the ECC/parity bits.
13551 * NOTE: All user context CSRs that are not mmaped write-only
13552 * (e.g. the TID flows) must be initialized even if the driver never
13555 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13560 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13561 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13563 /* SendCtxtCreditReturnAddr */
13564 for (i = 0; i < chip_send_contexts(dd); i++)
13565 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13567 /* PIO Send buffers */
13568 /* SDMA Send buffers */
13570 * These are not normally read, and (presently) have no method
13571 * to be read, so are not pre-initialized
13575 /* RcvHdrTailAddr */
13576 /* RcvTidFlowTable */
13577 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13578 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13579 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13580 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13581 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13585 for (i = 0; i < chip_rcv_array_count(dd); i++)
13586 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13588 /* RcvQPMapTable */
13589 for (i = 0; i < 32; i++)
13590 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13594 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13596 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13599 unsigned long timeout;
13602 /* is the condition present? */
13603 reg = read_csr(dd, CCE_STATUS);
13604 if ((reg & status_bits) == 0)
13607 /* clear the condition */
13608 write_csr(dd, CCE_CTRL, ctrl_bits);
13610 /* wait for the condition to clear */
13611 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13613 reg = read_csr(dd, CCE_STATUS);
13614 if ((reg & status_bits) == 0)
13616 if (time_after(jiffies, timeout)) {
13618 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13619 status_bits, reg & status_bits);
13626 /* set CCE CSRs to chip reset defaults */
13627 static void reset_cce_csrs(struct hfi1_devdata *dd)
13631 /* CCE_REVISION read-only */
13632 /* CCE_REVISION2 read-only */
13633 /* CCE_CTRL - bits clear automatically */
13634 /* CCE_STATUS read-only, use CceCtrl to clear */
13635 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13636 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13637 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13638 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13639 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13640 /* CCE_ERR_STATUS read-only */
13641 write_csr(dd, CCE_ERR_MASK, 0);
13642 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13643 /* CCE_ERR_FORCE leave alone */
13644 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13645 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13646 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13647 /* CCE_PCIE_CTRL leave alone */
13648 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13649 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13650 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13651 CCE_MSIX_TABLE_UPPER_RESETCSR);
13653 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13654 /* CCE_MSIX_PBA read-only */
13655 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13656 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13658 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13659 write_csr(dd, CCE_INT_MAP, 0);
13660 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13661 /* CCE_INT_STATUS read-only */
13662 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13663 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13664 /* CCE_INT_FORCE leave alone */
13665 /* CCE_INT_BLOCKED read-only */
13667 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13668 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13671 /* set MISC CSRs to chip reset defaults */
13672 static void reset_misc_csrs(struct hfi1_devdata *dd)
13676 for (i = 0; i < 32; i++) {
13677 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13678 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13679 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13682 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13683 * only be written 128-byte chunks
13685 /* init RSA engine to clear lingering errors */
13686 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13687 write_csr(dd, MISC_CFG_RSA_MU, 0);
13688 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13689 /* MISC_STS_8051_DIGEST read-only */
13690 /* MISC_STS_SBM_DIGEST read-only */
13691 /* MISC_STS_PCIE_DIGEST read-only */
13692 /* MISC_STS_FAB_DIGEST read-only */
13693 /* MISC_ERR_STATUS read-only */
13694 write_csr(dd, MISC_ERR_MASK, 0);
13695 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13696 /* MISC_ERR_FORCE leave alone */
13699 /* set TXE CSRs to chip reset defaults */
13700 static void reset_txe_csrs(struct hfi1_devdata *dd)
13707 write_csr(dd, SEND_CTRL, 0);
13708 __cm_reset(dd, 0); /* reset CM internal state */
13709 /* SEND_CONTEXTS read-only */
13710 /* SEND_DMA_ENGINES read-only */
13711 /* SEND_PIO_MEM_SIZE read-only */
13712 /* SEND_DMA_MEM_SIZE read-only */
13713 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13714 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13715 /* SEND_PIO_ERR_STATUS read-only */
13716 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13717 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13718 /* SEND_PIO_ERR_FORCE leave alone */
13719 /* SEND_DMA_ERR_STATUS read-only */
13720 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13721 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13722 /* SEND_DMA_ERR_FORCE leave alone */
13723 /* SEND_EGRESS_ERR_STATUS read-only */
13724 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13725 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13726 /* SEND_EGRESS_ERR_FORCE leave alone */
13727 write_csr(dd, SEND_BTH_QP, 0);
13728 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13729 write_csr(dd, SEND_SC2VLT0, 0);
13730 write_csr(dd, SEND_SC2VLT1, 0);
13731 write_csr(dd, SEND_SC2VLT2, 0);
13732 write_csr(dd, SEND_SC2VLT3, 0);
13733 write_csr(dd, SEND_LEN_CHECK0, 0);
13734 write_csr(dd, SEND_LEN_CHECK1, 0);
13735 /* SEND_ERR_STATUS read-only */
13736 write_csr(dd, SEND_ERR_MASK, 0);
13737 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13738 /* SEND_ERR_FORCE read-only */
13739 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13740 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13741 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13742 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13743 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13744 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13745 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13746 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13747 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13748 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13749 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13750 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13751 /* SEND_CM_CREDIT_USED_STATUS read-only */
13752 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13753 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13754 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13755 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13756 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13757 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13758 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13759 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13760 /* SEND_CM_CREDIT_USED_VL read-only */
13761 /* SEND_CM_CREDIT_USED_VL15 read-only */
13762 /* SEND_EGRESS_CTXT_STATUS read-only */
13763 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13764 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13765 /* SEND_EGRESS_ERR_INFO read-only */
13766 /* SEND_EGRESS_ERR_SOURCE read-only */
13769 * TXE Per-Context CSRs
13771 for (i = 0; i < chip_send_contexts(dd); i++) {
13772 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13773 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13774 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13775 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13776 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13777 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13778 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13779 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13780 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13781 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13782 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13783 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13787 * TXE Per-SDMA CSRs
13789 for (i = 0; i < chip_sdma_engines(dd); i++) {
13790 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13791 /* SEND_DMA_STATUS read-only */
13792 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13793 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13794 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13795 /* SEND_DMA_HEAD read-only */
13796 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13797 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13798 /* SEND_DMA_IDLE_CNT read-only */
13799 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13800 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13801 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13802 /* SEND_DMA_ENG_ERR_STATUS read-only */
13803 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13804 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13805 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13806 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13807 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13808 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13809 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13810 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13811 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13812 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13818 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13820 static void init_rbufs(struct hfi1_devdata *dd)
13826 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13831 reg = read_csr(dd, RCV_STATUS);
13832 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13833 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13836 * Give up after 1ms - maximum wait time.
13838 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13839 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13840 * 136 KB / (66% * 250MB/s) = 844us
13842 if (count++ > 500) {
13844 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13848 udelay(2); /* do not busy-wait the CSR */
13851 /* start the init - expect RcvCtrl to be 0 */
13852 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13855 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13856 * period after the write before RcvStatus.RxRbufInitDone is valid.
13857 * The delay in the first run through the loop below is sufficient and
13858 * required before the first read of RcvStatus.RxRbufInintDone.
13860 read_csr(dd, RCV_CTRL);
13862 /* wait for the init to finish */
13865 /* delay is required first time through - see above */
13866 udelay(2); /* do not busy-wait the CSR */
13867 reg = read_csr(dd, RCV_STATUS);
13868 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13871 /* give up after 100us - slowest possible at 33MHz is 73us */
13872 if (count++ > 50) {
13874 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13881 /* set RXE CSRs to chip reset defaults */
13882 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13889 write_csr(dd, RCV_CTRL, 0);
13891 /* RCV_STATUS read-only */
13892 /* RCV_CONTEXTS read-only */
13893 /* RCV_ARRAY_CNT read-only */
13894 /* RCV_BUF_SIZE read-only */
13895 write_csr(dd, RCV_BTH_QP, 0);
13896 write_csr(dd, RCV_MULTICAST, 0);
13897 write_csr(dd, RCV_BYPASS, 0);
13898 write_csr(dd, RCV_VL15, 0);
13899 /* this is a clear-down */
13900 write_csr(dd, RCV_ERR_INFO,
13901 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13902 /* RCV_ERR_STATUS read-only */
13903 write_csr(dd, RCV_ERR_MASK, 0);
13904 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13905 /* RCV_ERR_FORCE leave alone */
13906 for (i = 0; i < 32; i++)
13907 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13908 for (i = 0; i < 4; i++)
13909 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13910 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13911 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13912 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13913 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13914 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13915 clear_rsm_rule(dd, i);
13916 for (i = 0; i < 32; i++)
13917 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13920 * RXE Kernel and User Per-Context CSRs
13922 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13924 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13925 /* RCV_CTXT_STATUS read-only */
13926 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13927 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13928 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13929 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13930 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13931 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13932 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13933 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13934 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13935 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13938 /* RCV_HDR_TAIL read-only */
13939 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13940 /* RCV_EGR_INDEX_TAIL read-only */
13941 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13942 /* RCV_EGR_OFFSET_TAIL read-only */
13943 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13944 write_uctxt_csr(dd, i,
13945 RCV_TID_FLOW_TABLE + (8 * j), 0);
13951 * Set sc2vl tables.
13953 * They power on to zeros, so to avoid send context errors
13954 * they need to be set:
13956 * SC 0-7 -> VL 0-7 (respectively)
13961 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13964 /* init per architecture spec, constrained by hardware capability */
13966 /* HFI maps sent packets */
13967 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13973 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13979 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13985 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13992 /* DC maps received packets */
13993 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13995 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13996 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13997 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13999 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
14000 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14002 /* initialize the cached sc2vl values consistently with h/w */
14003 for (i = 0; i < 32; i++) {
14004 if (i < 8 || i == 15)
14005 *((u8 *)(dd->sc2vl) + i) = (u8)i;
14007 *((u8 *)(dd->sc2vl) + i) = 0;
14012 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
14013 * depend on the chip going through a power-on reset - a driver may be loaded
14014 * and unloaded many times.
14016 * Do not write any CSR values to the chip in this routine - there may be
14017 * a reset following the (possible) FLR in this routine.
14020 static int init_chip(struct hfi1_devdata *dd)
14026 * Put the HFI CSRs in a known state.
14027 * Combine this with a DC reset.
14029 * Stop the device from doing anything while we do a
14030 * reset. We know there are no other active users of
14031 * the device since we are now in charge. Turn off
14032 * off all outbound and inbound traffic and make sure
14033 * the device does not generate any interrupts.
14036 /* disable send contexts and SDMA engines */
14037 write_csr(dd, SEND_CTRL, 0);
14038 for (i = 0; i < chip_send_contexts(dd); i++)
14039 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14040 for (i = 0; i < chip_sdma_engines(dd); i++)
14041 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14042 /* disable port (turn off RXE inbound traffic) and contexts */
14043 write_csr(dd, RCV_CTRL, 0);
14044 for (i = 0; i < chip_rcv_contexts(dd); i++)
14045 write_csr(dd, RCV_CTXT_CTRL, 0);
14046 /* mask all interrupt sources */
14047 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14048 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14051 * DC Reset: do a full DC reset before the register clear.
14052 * A recommended length of time to hold is one CSR read,
14053 * so reread the CceDcCtrl. Then, hold the DC in reset
14054 * across the clear.
14056 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14057 (void)read_csr(dd, CCE_DC_CTRL);
14061 * A FLR will reset the SPC core and part of the PCIe.
14062 * The parts that need to be restored have already been
14065 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14067 /* do the FLR, the DC reset will remain */
14068 pcie_flr(dd->pcidev);
14070 /* restore command and BARs */
14071 ret = restore_pci_variables(dd);
14073 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14079 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14080 pcie_flr(dd->pcidev);
14081 ret = restore_pci_variables(dd);
14083 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14089 dd_dev_info(dd, "Resetting CSRs with writes\n");
14090 reset_cce_csrs(dd);
14091 reset_txe_csrs(dd);
14092 reset_rxe_csrs(dd);
14093 reset_misc_csrs(dd);
14095 /* clear the DC reset */
14096 write_csr(dd, CCE_DC_CTRL, 0);
14098 /* Set the LED off */
14102 * Clear the QSFP reset.
14103 * An FLR enforces a 0 on all out pins. The driver does not touch
14104 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
14105 * anything plugged constantly in reset, if it pays attention
14107 * Prime examples of this are optical cables. Set all pins high.
14108 * I2CCLK and I2CDAT will change per direction, and INT_N and
14109 * MODPRS_N are input only and their value is ignored.
14111 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14112 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14113 init_chip_resources(dd);
14117 static void init_early_variables(struct hfi1_devdata *dd)
14121 /* assign link credit variables */
14123 dd->link_credits = CM_GLOBAL_CREDITS;
14125 dd->link_credits--;
14126 dd->vcu = cu_to_vcu(hfi1_cu);
14127 /* enough room for 8 MAD packets plus header - 17K */
14128 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14129 if (dd->vl15_init > dd->link_credits)
14130 dd->vl15_init = dd->link_credits;
14132 write_uninitialized_csrs_and_memories(dd);
14134 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14135 for (i = 0; i < dd->num_pports; i++) {
14136 struct hfi1_pportdata *ppd = &dd->pport[i];
14138 set_partition_keys(ppd);
14140 init_sc2vl_tables(dd);
14143 static void init_kdeth_qp(struct hfi1_devdata *dd)
14145 write_csr(dd, SEND_BTH_QP,
14146 (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
14147 SEND_BTH_QP_KDETH_QP_SHIFT);
14149 write_csr(dd, RCV_BTH_QP,
14150 (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
14151 RCV_BTH_QP_KDETH_QP_SHIFT);
14155 * hfi1_get_qp_map - get qp map
14157 * @idx: index to read
14159 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14161 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14163 reg >>= (idx % 8) * 8;
14168 * init_qpmap_table - init qp map
14170 * @first_ctxt: first context
14171 * @last_ctxt: first context
14173 * This return sets the qpn mapping table that
14174 * is indexed by qpn[8:1].
14176 * The routine will round robin the 256 settings
14177 * from first_ctxt to last_ctxt.
14179 * The first/last looks ahead to having specialized
14180 * receive contexts for mgmt and bypass. Normal
14181 * verbs traffic will assumed to be on a range
14182 * of receive contexts.
14184 static void init_qpmap_table(struct hfi1_devdata *dd,
14189 u64 regno = RCV_QP_MAP_TABLE;
14191 u64 ctxt = first_ctxt;
14193 for (i = 0; i < 256; i++) {
14194 reg |= ctxt << (8 * (i % 8));
14196 if (ctxt > last_ctxt)
14199 write_csr(dd, regno, reg);
14205 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14206 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14209 struct rsm_map_table {
14210 u64 map[NUM_MAP_REGS];
14214 struct rsm_rule_data {
14230 * Return an initialized RMT map table for users to fill in. OK if it
14231 * returns NULL, indicating no table.
14233 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14235 struct rsm_map_table *rmt;
14236 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14238 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14240 memset(rmt->map, rxcontext, sizeof(rmt->map));
14248 * Write the final RMT map table to the chip and free the table. OK if
14251 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14252 struct rsm_map_table *rmt)
14257 /* write table to chip */
14258 for (i = 0; i < NUM_MAP_REGS; i++)
14259 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14262 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14266 /* Is a receive side mapping rule */
14267 static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14269 return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
14273 * Add a receive side mapping rule.
14275 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14276 struct rsm_rule_data *rrd)
14278 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14279 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14280 1ull << rule_index | /* enable bit */
14281 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14282 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14283 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14284 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14285 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14286 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14287 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14288 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14289 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14290 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14291 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14292 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14293 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14297 * Clear a receive side mapping rule.
14299 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14301 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14302 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14303 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14306 /* return the number of RSM map table entries that will be used for QOS */
14307 static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
14312 uint max_by_vl = 0;
14314 /* is QOS active at all? */
14315 if (n_krcv_queues < MIN_KERNEL_KCTXTS ||
14320 /* determine bits for qpn */
14321 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14322 if (krcvqs[i] > max_by_vl)
14323 max_by_vl = krcvqs[i];
14324 if (max_by_vl > 32)
14326 m = ilog2(__roundup_pow_of_two(max_by_vl));
14328 /* determine bits for vl */
14329 n = ilog2(__roundup_pow_of_two(num_vls));
14331 /* reject if too much is used */
14340 return 1 << (m + n);
14351 * init_qos - init RX qos
14353 * @rmt: RSM map table
14355 * This routine initializes Rule 0 and the RSM map table to implement
14356 * quality of service (qos).
14358 * If all of the limit tests succeed, qos is applied based on the array
14359 * interpretation of krcvqs where entry 0 is VL0.
14361 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14362 * feed both the RSM map table and the single rule.
14364 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14366 struct rsm_rule_data rrd;
14367 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14368 unsigned int rmt_entries;
14373 rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n);
14374 if (rmt_entries == 0)
14376 qpns_per_vl = 1 << m;
14378 /* enough room in the map table? */
14379 rmt_entries = 1 << (m + n);
14380 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14383 /* add qos entries to the RSM map table */
14384 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14387 for (qpn = 0, tctxt = ctxt;
14388 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14389 unsigned idx, regoff, regidx;
14391 /* generate the index the hardware will produce */
14392 idx = rmt->used + ((qpn << n) ^ i);
14393 regoff = (idx % 8) * 8;
14395 /* replace default with context number */
14396 reg = rmt->map[regidx];
14397 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14399 reg |= (u64)(tctxt++) << regoff;
14400 rmt->map[regidx] = reg;
14401 if (tctxt == ctxt + krcvqs[i])
14407 rrd.offset = rmt->used;
14409 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14410 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14411 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14412 rrd.index1_width = n;
14413 rrd.index2_off = QPN_SELECT_OFFSET;
14414 rrd.index2_width = m + n;
14415 rrd.mask1 = LRH_BTH_MASK;
14416 rrd.value1 = LRH_BTH_VALUE;
14417 rrd.mask2 = LRH_SC_MASK;
14418 rrd.value2 = LRH_SC_VALUE;
14421 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14423 /* mark RSM map entries as used */
14424 rmt->used += rmt_entries;
14425 /* map everything else to the mcast/err/vl15 context */
14426 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14427 dd->qos_shift = n + 1;
14431 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14434 static void init_fecn_handling(struct hfi1_devdata *dd,
14435 struct rsm_map_table *rmt)
14437 struct rsm_rule_data rrd;
14439 int i, idx, regoff, regidx, start;
14443 if (HFI1_CAP_IS_KSET(TID_RDMA))
14444 /* Exclude context 0 */
14447 start = dd->first_dyn_alloc_ctxt;
14449 total_cnt = dd->num_rcv_contexts - start;
14451 /* there needs to be enough room in the map table */
14452 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14453 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14458 * RSM will extract the destination context as an index into the
14459 * map table. The destination contexts are a sequential block
14460 * in the range start...num_rcv_contexts-1 (inclusive).
14461 * Map entries are accessed as offset + extracted value. Adjust
14462 * the added offset so this sequence can be placed anywhere in
14463 * the table - as long as the entries themselves do not wrap.
14464 * There are only enough bits in offset for the table size, so
14465 * start with that to allow for a "negative" offset.
14467 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14469 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14471 /* replace with identity mapping */
14472 regoff = (idx % 8) * 8;
14474 reg = rmt->map[regidx];
14475 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14476 reg |= (u64)i << regoff;
14477 rmt->map[regidx] = reg;
14481 * For RSM intercept of Expected FECN packets:
14482 * o packet type 0 - expected
14483 * o match on F (bit 95), using select/match 1, and
14484 * o match on SH (bit 133), using select/match 2.
14486 * Use index 1 to extract the 8-bit receive context from DestQP
14487 * (start at bit 64). Use that as the RSM map table index.
14489 rrd.offset = offset;
14491 rrd.field1_off = 95;
14492 rrd.field2_off = 133;
14493 rrd.index1_off = 64;
14494 rrd.index1_width = 8;
14495 rrd.index2_off = 0;
14496 rrd.index2_width = 0;
14503 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14505 rmt->used += total_cnt;
14508 static inline bool hfi1_is_rmt_full(int start, int spare)
14510 return (start + spare) > NUM_MAP_ENTRIES;
14513 static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
14519 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14520 int ctxt_count = hfi1_netdev_ctxt_count(dd);
14522 /* We already have contexts mapped in RMT */
14523 if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
14524 dd_dev_info(dd, "Contexts are already mapped in RMT\n");
14528 if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
14529 dd_dev_err(dd, "Not enough RMT entries used = %d\n",
14534 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
14536 rmt_start + NUM_NETDEV_MAP_ENTRIES);
14538 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14539 regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
14540 reg = read_csr(dd, regoff);
14541 for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
14542 /* Update map register with netdev context */
14543 j = (rmt_start + i) % 8;
14544 reg &= ~(0xffllu << (j * 8));
14545 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
14546 /* Wrap up netdev ctx index */
14547 ctx_id %= ctxt_count;
14548 /* Write back map register */
14549 if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
14550 dev_dbg(&(dd)->pcidev->dev,
14551 "RMT[%d] =0x%llx\n",
14552 regoff - RCV_RSM_MAP_TABLE, reg);
14554 write_csr(dd, regoff, reg);
14556 if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
14557 reg = read_csr(dd, regoff);
14564 static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
14565 int rule, struct rsm_rule_data *rrd)
14567 if (!hfi1_netdev_update_rmt(dd)) {
14568 dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
14572 add_rsm_rule(dd, rule, rrd);
14573 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14576 void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
14579 * go through with the initialisation only if this rule actually doesn't
14582 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
14583 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14584 struct rsm_rule_data rrd = {
14585 .offset = rmt_start,
14586 .pkt_type = IB_PACKET_TYPE,
14587 .field1_off = LRH_BTH_MATCH_OFFSET,
14588 .mask1 = LRH_BTH_MASK,
14589 .value1 = LRH_BTH_VALUE,
14590 .field2_off = BTH_DESTQP_MATCH_OFFSET,
14591 .mask2 = BTH_DESTQP_MASK,
14592 .value2 = BTH_DESTQP_VALUE,
14593 .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
14594 ilog2(NUM_NETDEV_MAP_ENTRIES),
14595 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
14596 .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
14597 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
14600 hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
14604 /* Initialize RSM for VNIC */
14605 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14607 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14608 struct rsm_rule_data rrd = {
14609 /* Add rule for vnic */
14610 .offset = rmt_start,
14612 /* Match 16B packets */
14613 .field1_off = L2_TYPE_MATCH_OFFSET,
14614 .mask1 = L2_TYPE_MASK,
14615 .value1 = L2_16B_VALUE,
14616 /* Match ETH L4 packets */
14617 .field2_off = L4_TYPE_MATCH_OFFSET,
14618 .mask2 = L4_16B_TYPE_MASK,
14619 .value2 = L4_16B_ETH_VALUE,
14620 /* Calc context from veswid and entropy */
14621 .index1_off = L4_16B_HDR_VESWID_OFFSET,
14622 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
14623 .index2_off = L2_16B_ENTROPY_OFFSET,
14624 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
14627 hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14630 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14632 clear_rsm_rule(dd, RSM_INS_VNIC);
14635 void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
14637 /* only actually clear the rule if it's the last user asking to do so */
14638 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
14639 clear_rsm_rule(dd, RSM_INS_AIP);
14642 static int init_rxe(struct hfi1_devdata *dd)
14644 struct rsm_map_table *rmt;
14647 /* enable all receive errors */
14648 write_csr(dd, RCV_ERR_MASK, ~0ull);
14650 rmt = alloc_rsm_map_table(dd);
14654 /* set up QOS, including the QPN map table */
14656 init_fecn_handling(dd, rmt);
14657 complete_rsm_map_table(dd, rmt);
14658 /* record number of used rsm map entries for netdev */
14659 hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
14663 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14664 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14665 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14666 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14667 * Max_PayLoad_Size set to its minimum of 128.
14669 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14670 * (64 bytes). Max_Payload_Size is possibly modified upward in
14671 * tune_pcie_caps() which is called after this routine.
14674 /* Have 16 bytes (4DW) of bypass header available in header queue */
14675 val = read_csr(dd, RCV_BYPASS);
14676 val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14677 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14678 RCV_BYPASS_HDR_SIZE_SHIFT);
14679 write_csr(dd, RCV_BYPASS, val);
14683 static void init_other(struct hfi1_devdata *dd)
14685 /* enable all CCE errors */
14686 write_csr(dd, CCE_ERR_MASK, ~0ull);
14687 /* enable *some* Misc errors */
14688 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14689 /* enable all DC errors, except LCB */
14690 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14691 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14695 * Fill out the given AU table using the given CU. A CU is defined in terms
14696 * AUs. The table is a an encoding: given the index, how many AUs does that
14699 * NOTE: Assumes that the register layout is the same for the
14700 * local and remote tables.
14702 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14703 u32 csr0to3, u32 csr4to7)
14705 write_csr(dd, csr0to3,
14706 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14707 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14709 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14711 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14712 write_csr(dd, csr4to7,
14714 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14716 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14718 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14720 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14723 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14725 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14726 SEND_CM_LOCAL_AU_TABLE4_TO7);
14729 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14731 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14732 SEND_CM_REMOTE_AU_TABLE4_TO7);
14735 static void init_txe(struct hfi1_devdata *dd)
14739 /* enable all PIO, SDMA, general, and Egress errors */
14740 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14741 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14742 write_csr(dd, SEND_ERR_MASK, ~0ull);
14743 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14745 /* enable all per-context and per-SDMA engine errors */
14746 for (i = 0; i < chip_send_contexts(dd); i++)
14747 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14748 for (i = 0; i < chip_sdma_engines(dd); i++)
14749 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14751 /* set the local CU to AU mapping */
14752 assign_local_cm_au_table(dd, dd->vcu);
14755 * Set reasonable default for Credit Return Timer
14756 * Don't set on Simulator - causes it to choke.
14758 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14759 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14762 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14768 if (!rcd || !rcd->sc)
14771 hw_ctxt = rcd->sc->hw_context;
14772 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14773 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14774 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14775 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14776 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14777 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14778 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14780 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14783 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14784 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14785 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14788 /* Enable J_KEY check on receive context. */
14789 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14790 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14791 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14792 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14797 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14802 if (!rcd || !rcd->sc)
14805 hw_ctxt = rcd->sc->hw_context;
14806 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14808 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14809 * This check would not have been enabled for A0 h/w, see
14813 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14814 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14815 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14817 /* Turn off the J_KEY on the receive side */
14818 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14823 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14829 if (!rcd || !rcd->sc)
14832 hw_ctxt = rcd->sc->hw_context;
14833 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14834 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14835 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14836 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14837 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14838 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14839 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14844 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14849 if (!ctxt || !ctxt->sc)
14852 hw_ctxt = ctxt->sc->hw_context;
14853 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14854 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14855 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14856 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14862 * Start doing the clean up the chip. Our clean up happens in multiple
14863 * stages and this is just the first.
14865 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14870 finish_chip_resources(dd);
14873 #define HFI_BASE_GUID(dev) \
14874 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14877 * Information can be shared between the two HFIs on the same ASIC
14878 * in the same OS. This function finds the peer device and sets
14879 * up a shared structure.
14881 static int init_asic_data(struct hfi1_devdata *dd)
14883 unsigned long index;
14884 struct hfi1_devdata *peer;
14885 struct hfi1_asic_data *asic_data;
14888 /* pre-allocate the asic structure in case we are the first device */
14889 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14893 xa_lock_irq(&hfi1_dev_table);
14894 /* Find our peer device */
14895 xa_for_each(&hfi1_dev_table, index, peer) {
14896 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14897 dd->unit != peer->unit)
14902 /* use already allocated structure */
14903 dd->asic_data = peer->asic_data;
14906 dd->asic_data = asic_data;
14907 mutex_init(&dd->asic_data->asic_resource_mutex);
14909 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14910 xa_unlock_irq(&hfi1_dev_table);
14912 /* first one through - set up i2c devices */
14914 ret = set_up_i2c(dd, dd->asic_data);
14920 * Set dd->boardname. Use a generic name if a name is not returned from
14921 * EFI variable space.
14923 * Return 0 on success, -ENOMEM if space could not be allocated.
14925 static int obtain_boardname(struct hfi1_devdata *dd)
14927 /* generic board description */
14928 const char generic[] =
14929 "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series";
14930 unsigned long size;
14933 ret = read_hfi1_efi_var(dd, "description", &size,
14934 (void **)&dd->boardname);
14936 dd_dev_info(dd, "Board description not found\n");
14937 /* use generic description */
14938 dd->boardname = kstrdup(generic, GFP_KERNEL);
14939 if (!dd->boardname)
14946 * Check the interrupt registers to make sure that they are mapped correctly.
14947 * It is intended to help user identify any mismapping by VMM when the driver
14948 * is running in a VM. This function should only be called before interrupt
14949 * is set up properly.
14951 * Return 0 on success, -EINVAL on failure.
14953 static int check_int_registers(struct hfi1_devdata *dd)
14956 u64 all_bits = ~(u64)0;
14959 /* Clear CceIntMask[0] to avoid raising any interrupts */
14960 mask = read_csr(dd, CCE_INT_MASK);
14961 write_csr(dd, CCE_INT_MASK, 0ull);
14962 reg = read_csr(dd, CCE_INT_MASK);
14966 /* Clear all interrupt status bits */
14967 write_csr(dd, CCE_INT_CLEAR, all_bits);
14968 reg = read_csr(dd, CCE_INT_STATUS);
14972 /* Set all interrupt status bits */
14973 write_csr(dd, CCE_INT_FORCE, all_bits);
14974 reg = read_csr(dd, CCE_INT_STATUS);
14975 if (reg != all_bits)
14978 /* Restore the interrupt mask */
14979 write_csr(dd, CCE_INT_CLEAR, all_bits);
14980 write_csr(dd, CCE_INT_MASK, mask);
14984 write_csr(dd, CCE_INT_MASK, mask);
14985 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14990 * hfi1_init_dd() - Initialize most of the dd structure.
14991 * @dd: the dd device
14993 * This is global, and is called directly at init to set up the
14994 * chip-specific function pointers for later use.
14996 int hfi1_init_dd(struct hfi1_devdata *dd)
14998 struct pci_dev *pdev = dd->pcidev;
14999 struct hfi1_pportdata *ppd;
15002 static const char * const inames[] = { /* implementation names */
15004 "RTL VCS simulation",
15005 "RTL FPGA emulation",
15006 "Functional simulator"
15008 struct pci_dev *parent = pdev->bus->self;
15009 u32 sdma_engines = chip_sdma_engines(dd);
15012 for (i = 0; i < dd->num_pports; i++, ppd++) {
15014 /* init common fields */
15015 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15016 /* DC supports 4 link widths */
15017 ppd->link_width_supported =
15018 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15019 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15020 ppd->link_width_downgrade_supported =
15021 ppd->link_width_supported;
15022 /* start out enabling only 4X */
15023 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15024 ppd->link_width_downgrade_enabled =
15025 ppd->link_width_downgrade_supported;
15026 /* link width active is 0 when link is down */
15027 /* link width downgrade active is 0 when link is down */
15029 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15030 num_vls > HFI1_MAX_VLS_SUPPORTED) {
15031 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
15032 num_vls, HFI1_MAX_VLS_SUPPORTED);
15033 num_vls = HFI1_MAX_VLS_SUPPORTED;
15035 ppd->vls_supported = num_vls;
15036 ppd->vls_operational = ppd->vls_supported;
15037 /* Set the default MTU. */
15038 for (vl = 0; vl < num_vls; vl++)
15039 dd->vld[vl].mtu = hfi1_max_mtu;
15040 dd->vld[15].mtu = MAX_MAD_PACKET;
15042 * Set the initial values to reasonable default, will be set
15043 * for real when link is up.
15045 ppd->overrun_threshold = 0x4;
15046 ppd->phy_error_threshold = 0xf;
15047 ppd->port_crc_mode_enabled = link_crc_mask;
15048 /* initialize supported LTP CRC mode */
15049 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15050 /* initialize enabled LTP CRC mode */
15051 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15052 /* start in offline */
15053 ppd->host_link_state = HLS_DN_OFFLINE;
15054 init_vl_arb_caches(ppd);
15058 * Do remaining PCIe setup and save PCIe values in dd.
15059 * Any error printing is already done by the init code.
15060 * On return, we have the chip mapped.
15062 ret = hfi1_pcie_ddinit(dd, pdev);
15066 /* Save PCI space registers to rewrite after device reset */
15067 ret = save_pci_variables(dd);
15071 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15072 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15073 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15074 & CCE_REVISION_CHIP_REV_MINOR_MASK;
15077 * Check interrupt registers mapping if the driver has no access to
15078 * the upstream component. In this case, it is likely that the driver
15079 * is running in a VM.
15082 ret = check_int_registers(dd);
15088 * obtain the hardware ID - NOT related to unit, which is a
15089 * software enumeration
15091 reg = read_csr(dd, CCE_REVISION2);
15092 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15093 & CCE_REVISION2_HFI_ID_MASK;
15094 /* the variable size will remove unwanted bits */
15095 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15096 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15097 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15098 dd->icode < ARRAY_SIZE(inames) ?
15099 inames[dd->icode] : "unknown", (int)dd->irev);
15101 /* speeds the hardware can support */
15102 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15103 /* speeds allowed to run at */
15104 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15105 /* give a reasonable active value, will be set on link up */
15106 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15108 /* fix up link widths for emulation _p */
15110 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15111 ppd->link_width_supported =
15112 ppd->link_width_enabled =
15113 ppd->link_width_downgrade_supported =
15114 ppd->link_width_downgrade_enabled =
15117 /* insure num_vls isn't larger than number of sdma engines */
15118 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15119 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15120 num_vls, sdma_engines);
15121 num_vls = sdma_engines;
15122 ppd->vls_supported = sdma_engines;
15123 ppd->vls_operational = ppd->vls_supported;
15127 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15128 * Limit the max if larger than the field holds. If timeout is
15129 * non-zero, then the calculated field will be at least 1.
15131 * Must be after icode is set up - the cclock rate depends
15132 * on knowing the hardware being used.
15134 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15135 if (dd->rcv_intr_timeout_csr >
15136 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15137 dd->rcv_intr_timeout_csr =
15138 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15139 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15140 dd->rcv_intr_timeout_csr = 1;
15142 /* needs to be done before we look for the peer device */
15145 /* set up shared ASIC data with peer device */
15146 ret = init_asic_data(dd);
15150 /* obtain chip sizes, reset chip CSRs */
15151 ret = init_chip(dd);
15155 /* read in the PCIe link speed information */
15156 ret = pcie_speeds(dd);
15160 /* call before get_platform_config(), after init_chip_resources() */
15161 ret = eprom_init(dd);
15163 goto bail_free_rcverr;
15165 /* Needs to be called before hfi1_firmware_init */
15166 get_platform_config(dd);
15168 /* read in firmware */
15169 ret = hfi1_firmware_init(dd);
15174 * In general, the PCIe Gen3 transition must occur after the
15175 * chip has been idled (so it won't initiate any PCIe transactions
15176 * e.g. an interrupt) and before the driver changes any registers
15177 * (the transition will reset the registers).
15179 * In particular, place this call after:
15180 * - init_chip() - the chip will not initiate any PCIe transactions
15181 * - pcie_speeds() - reads the current link speed
15182 * - hfi1_firmware_init() - the needed firmware is ready to be
15185 ret = do_pcie_gen3_transition(dd);
15190 * This should probably occur in hfi1_pcie_init(), but historically
15191 * occurs after the do_pcie_gen3_transition() code.
15193 tune_pcie_caps(dd);
15195 /* start setting dd values and adjusting CSRs */
15196 init_early_variables(dd);
15198 parse_platform_config(dd);
15200 ret = obtain_boardname(dd);
15204 snprintf(dd->boardversion, BOARD_VERS_MAX,
15205 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15206 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15209 (dd->revision >> CCE_REVISION_SW_SHIFT)
15210 & CCE_REVISION_SW_MASK);
15212 /* alloc VNIC/AIP rx data */
15213 ret = hfi1_alloc_rx(dd);
15217 ret = set_up_context_variables(dd);
15221 /* set initial RXE CSRs */
15222 ret = init_rxe(dd);
15226 /* set initial TXE CSRs */
15228 /* set initial non-RXE, non-TXE CSRs */
15230 /* set up KDETH QP prefix in both RX and TX CSRs */
15233 ret = hfi1_dev_affinity_init(dd);
15237 /* send contexts must be set up before receive contexts */
15238 ret = init_send_contexts(dd);
15242 ret = hfi1_create_kctxts(dd);
15247 * Initialize aspm, to be done after gen3 transition and setting up
15248 * contexts and before enabling interrupts
15252 ret = init_pervl_scs(dd);
15257 for (i = 0; i < dd->num_pports; ++i) {
15258 ret = sdma_init(dd, i);
15263 /* use contexts created by hfi1_create_kctxts */
15264 ret = set_up_interrupts(dd);
15268 ret = hfi1_comp_vectors_set_up(dd);
15270 goto bail_clear_intr;
15272 /* set up LCB access - must be after set_up_interrupts() */
15273 init_lcb_access(dd);
15276 * Serial number is created from the base guid:
15277 * [27:24] = base guid [38:35]
15278 * [23: 0] = base guid [23: 0]
15280 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15281 (dd->base_guid & 0xFFFFFF) |
15282 ((dd->base_guid >> 11) & 0xF000000));
15284 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15285 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15286 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15288 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15290 goto bail_clear_intr;
15294 ret = init_cntrs(dd);
15296 goto bail_clear_intr;
15298 ret = init_rcverr(dd);
15300 goto bail_free_cntrs;
15302 init_completion(&dd->user_comp);
15304 /* The user refcount starts with one to inidicate an active device */
15305 refcount_set(&dd->user_refcount, 1);
15314 hfi1_comp_vectors_clean_up(dd);
15315 msix_clean_up_interrupts(dd);
15318 hfi1_pcie_ddcleanup(dd);
15320 hfi1_free_devdata(dd);
15325 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15329 u32 current_egress_rate = ppd->current_egress_rate;
15330 /* rates here are in units of 10^6 bits/sec */
15332 if (desired_egress_rate == -1)
15333 return 0; /* shouldn't happen */
15335 if (desired_egress_rate >= current_egress_rate)
15336 return 0; /* we can't help go faster, only slower */
15338 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15339 egress_cycles(dw_len * 4, current_egress_rate);
15341 return (u16)delta_cycles;
15345 * create_pbc - build a pbc for transmission
15346 * @ppd: info of physical Hfi port
15347 * @flags: special case flags or-ed in built pbc
15348 * @srate_mbs: static rate
15350 * @dw_len: dword length (header words + data words + pbc words)
15352 * Create a PBC with the given flags, rate, VL, and length.
15354 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15355 * for verbs, which does not use this PSM feature. The lone other caller
15356 * is for the diagnostic interface which calls this if the user does not
15357 * supply their own PBC.
15359 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15362 u64 pbc, delay = 0;
15364 if (unlikely(srate_mbs))
15365 delay = delay_cycles(ppd, srate_mbs, dw_len);
15368 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15369 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15370 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15371 | (dw_len & PBC_LENGTH_DWS_MASK)
15372 << PBC_LENGTH_DWS_SHIFT;
15377 #define SBUS_THERMAL 0x4f
15378 #define SBUS_THERM_MONITOR_MODE 0x1
15380 #define THERM_FAILURE(dev, ret, reason) \
15382 "Thermal sensor initialization failed: %s (%d)\n", \
15386 * Initialize the thermal sensor.
15388 * After initialization, enable polling of thermal sensor through
15389 * SBus interface. In order for this to work, the SBus Master
15390 * firmware has to be loaded due to the fact that the HW polling
15391 * logic uses SBus interrupts, which are not supported with
15392 * default firmware. Otherwise, no data will be returned through
15393 * the ASIC_STS_THERM CSR.
15395 static int thermal_init(struct hfi1_devdata *dd)
15399 if (dd->icode != ICODE_RTL_SILICON ||
15400 check_chip_resource(dd, CR_THERM_INIT, NULL))
15403 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15405 THERM_FAILURE(dd, ret, "Acquire SBus");
15409 dd_dev_info(dd, "Initializing thermal sensor\n");
15410 /* Disable polling of thermal readings */
15411 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15413 /* Thermal Sensor Initialization */
15414 /* Step 1: Reset the Thermal SBus Receiver */
15415 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15416 RESET_SBUS_RECEIVER, 0);
15418 THERM_FAILURE(dd, ret, "Bus Reset");
15421 /* Step 2: Set Reset bit in Thermal block */
15422 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15423 WRITE_SBUS_RECEIVER, 0x1);
15425 THERM_FAILURE(dd, ret, "Therm Block Reset");
15428 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15429 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15430 WRITE_SBUS_RECEIVER, 0x32);
15432 THERM_FAILURE(dd, ret, "Write Clock Div");
15435 /* Step 4: Select temperature mode */
15436 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15437 WRITE_SBUS_RECEIVER,
15438 SBUS_THERM_MONITOR_MODE);
15440 THERM_FAILURE(dd, ret, "Write Mode Sel");
15443 /* Step 5: De-assert block reset and start conversion */
15444 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15445 WRITE_SBUS_RECEIVER, 0x2);
15447 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15450 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15453 /* Enable polling of thermal readings */
15454 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15456 /* Set initialized flag */
15457 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15459 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15462 release_chip_resource(dd, CR_SBUS);
15466 static void handle_temp_err(struct hfi1_devdata *dd)
15468 struct hfi1_pportdata *ppd = &dd->pport[0];
15470 * Thermal Critical Interrupt
15471 * Put the device into forced freeze mode, take link down to
15472 * offline, and put DC into reset.
15475 "Critical temperature reached! Forcing device into freeze mode!\n");
15476 dd->flags |= HFI1_FORCED_FREEZE;
15477 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15479 * Shut DC down as much and as quickly as possible.
15481 * Step 1: Take the link down to OFFLINE. This will cause the
15482 * 8051 to put the Serdes in reset. However, we don't want to
15483 * go through the entire link state machine since we want to
15484 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15485 * but rather an attempt to save the chip.
15486 * Code below is almost the same as quiet_serdes() but avoids
15487 * all the extra work and the sleeps.
15489 ppd->driver_link_ready = 0;
15490 ppd->link_enabled = 0;
15491 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15494 * Step 2: Shutdown LCB and 8051
15495 * After shutdown, do not restore DC_CFG_RESET value.