2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/delay.h>
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
60 u32 val = t4_read_reg(adapter, reg);
62 if (!!(val & mask) == polarity) {
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
126 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
134 * Handle a FW assertion reported in a mailbox.
136 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
138 struct fw_debug_cmd asrt;
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
147 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
183 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
186 static const int delay[] = {
187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
197 if ((size & 15) || size > MBOX_LEN)
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
204 if (adap->pdev->error_state != pci_channel_io_normal)
207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
260 * t4_mc_read - read from MC through backdoor accesses
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
270 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
285 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
296 * t4_edc_read - read from EDC through backdoor accesses
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
307 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
323 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
333 #define EEPROM_STAT_ADDR 0x7bfc
338 * t4_seeprom_wp - enable/disable EEPROM write protection
339 * @adapter: the adapter
340 * @enable: whether to enable or disable write protection
342 * Enables or disables write protection on the serial EEPROM.
344 int t4_seeprom_wp(struct adapter *adapter, bool enable)
346 unsigned int v = enable ? 0xc : 0;
347 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
348 return ret < 0 ? ret : 0;
352 * get_vpd_params - read VPD parameters from VPD EEPROM
353 * @adapter: adapter to read
354 * @p: where to store the parameters
356 * Reads card parameters stored in VPD EEPROM.
358 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
362 u8 vpd[VPD_LEN], csum;
363 unsigned int vpdr_len, kw_offset, id_len;
365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
370 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
374 id_len = pci_vpd_lrdt_size(vpd);
378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
380 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
386 if (vpdr_len + kw_offset > VPD_LEN) {
387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
391 #define FIND_VPD_KW(var, name) do { \
392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
397 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
400 FIND_VPD_KW(i, "RV");
401 for (csum = 0; i >= 0; i--)
405 dev_err(adapter->pdev_dev,
406 "corrupted VPD EEPROM, actual csum %u\n", csum);
410 FIND_VPD_KW(ec, "EC");
411 FIND_VPD_KW(sn, "SN");
414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
416 memcpy(p->ec, vpd + ec, EC_LEN);
418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
424 /* serial flash and firmware constants */
426 SF_ATTEMPTS = 10, /* max retries for SF operations */
428 /* flash command opcodes */
429 SF_PROG_PAGE = 2, /* program page */
430 SF_WR_DISABLE = 4, /* disable writes */
431 SF_RD_STATUS = 5, /* read status register */
432 SF_WR_ENABLE = 6, /* enable writes */
433 SF_RD_DATA_FAST = 0xb, /* read flash */
434 SF_RD_ID = 0x9f, /* read ID */
435 SF_ERASE_SECTOR = 0xd8, /* erase sector */
437 FW_MAX_SIZE = 512 * 1024,
441 * sf1_read - read data from the serial flash
442 * @adapter: the adapter
443 * @byte_cnt: number of bytes to read
444 * @cont: whether another operation will be chained
445 * @lock: whether to lock SF for PL access only
446 * @valp: where to store the read data
448 * Reads up to 4 bytes of data from the serial flash. The location of
449 * the read needs to be specified prior to calling this by issuing the
450 * appropriate commands to the serial flash.
452 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
457 if (!byte_cnt || byte_cnt > 4)
459 if (t4_read_reg(adapter, SF_OP) & BUSY)
461 cont = cont ? SF_CONT : 0;
462 lock = lock ? SF_LOCK : 0;
463 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
464 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
466 *valp = t4_read_reg(adapter, SF_DATA);
471 * sf1_write - write data to the serial flash
472 * @adapter: the adapter
473 * @byte_cnt: number of bytes to write
474 * @cont: whether another operation will be chained
475 * @lock: whether to lock SF for PL access only
476 * @val: value to write
478 * Writes up to 4 bytes of data to the serial flash. The location of
479 * the write needs to be specified prior to calling this by issuing the
480 * appropriate commands to the serial flash.
482 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
485 if (!byte_cnt || byte_cnt > 4)
487 if (t4_read_reg(adapter, SF_OP) & BUSY)
489 cont = cont ? SF_CONT : 0;
490 lock = lock ? SF_LOCK : 0;
491 t4_write_reg(adapter, SF_DATA, val);
492 t4_write_reg(adapter, SF_OP, lock |
493 cont | BYTECNT(byte_cnt - 1) | OP_WR);
494 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
498 * flash_wait_op - wait for a flash operation to complete
499 * @adapter: the adapter
500 * @attempts: max number of polls of the status register
501 * @delay: delay between polls in ms
503 * Wait for a flash operation to complete by polling the status register.
505 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
511 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
512 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
524 * t4_read_flash - read words from serial flash
525 * @adapter: the adapter
526 * @addr: the start address for the read
527 * @nwords: how many 32-bit words to read
528 * @data: where to store the read data
529 * @byte_oriented: whether to store data as bytes or as words
531 * Read the specified number of 32-bit words from the serial flash.
532 * If @byte_oriented is set the read data is stored as a byte array
533 * (i.e., big-endian), otherwise as 32-bit words in the platform's
536 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
537 unsigned int nwords, u32 *data, int byte_oriented)
541 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
544 addr = swab32(addr) | SF_RD_DATA_FAST;
546 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
547 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
550 for ( ; nwords; nwords--, data++) {
551 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
553 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
557 *data = htonl(*data);
563 * t4_write_flash - write up to a page of data to the serial flash
564 * @adapter: the adapter
565 * @addr: the start address to write
566 * @n: length of data to write in bytes
567 * @data: the data to write
569 * Writes up to a page of data (256 bytes) to the serial flash starting
570 * at the given address. All the data must be written to the same page.
572 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
573 unsigned int n, const u8 *data)
577 unsigned int i, c, left, val, offset = addr & 0xff;
579 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
582 val = swab32(addr) | SF_PROG_PAGE;
584 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
585 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
588 for (left = n; left; left -= c) {
590 for (val = 0, i = 0; i < c; ++i)
591 val = (val << 8) + *data++;
593 ret = sf1_write(adapter, c, c != left, 1, val);
597 ret = flash_wait_op(adapter, 8, 1);
601 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
603 /* Read the page to verify the write succeeded */
604 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
608 if (memcmp(data - n, (u8 *)buf + offset, n)) {
609 dev_err(adapter->pdev_dev,
610 "failed to correctly write the flash page at %#x\n",
617 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
622 * get_fw_version - read the firmware version
623 * @adapter: the adapter
624 * @vers: where to place the version
626 * Reads the FW version from flash.
628 static int get_fw_version(struct adapter *adapter, u32 *vers)
630 return t4_read_flash(adapter, adapter->params.sf_fw_start +
631 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
635 * get_tp_version - read the TP microcode version
636 * @adapter: the adapter
637 * @vers: where to place the version
639 * Reads the TP microcode version from flash.
641 static int get_tp_version(struct adapter *adapter, u32 *vers)
643 return t4_read_flash(adapter, adapter->params.sf_fw_start +
644 offsetof(struct fw_hdr, tp_microcode_ver),
649 * t4_check_fw_version - check if the FW is compatible with this driver
650 * @adapter: the adapter
652 * Checks if an adapter's FW is compatible with the driver. Returns 0
653 * if there's exact match, a negative error if the version could not be
654 * read or there's a major version mismatch, and a positive value if the
655 * expected major version is found but there's a minor version mismatch.
657 int t4_check_fw_version(struct adapter *adapter)
660 int ret, major, minor, micro;
662 ret = get_fw_version(adapter, &adapter->params.fw_vers);
664 ret = get_tp_version(adapter, &adapter->params.tp_vers);
666 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
667 offsetof(struct fw_hdr, intfver_nic),
672 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
673 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
674 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
675 memcpy(adapter->params.api_vers, api_vers,
676 sizeof(adapter->params.api_vers));
678 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
679 dev_err(adapter->pdev_dev,
680 "card FW has major version %u, driver wants %u\n",
681 major, FW_VERSION_MAJOR);
685 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
686 return 0; /* perfect match */
688 /* Minor/micro version mismatch. Report it but often it's OK. */
693 * t4_flash_erase_sectors - erase a range of flash sectors
694 * @adapter: the adapter
695 * @start: the first sector to erase
696 * @end: the last sector to erase
698 * Erases the sectors in the given inclusive range.
700 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
704 while (start <= end) {
705 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
706 (ret = sf1_write(adapter, 4, 0, 1,
707 SF_ERASE_SECTOR | (start << 8))) != 0 ||
708 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
709 dev_err(adapter->pdev_dev,
710 "erase of flash sector %d failed, error %d\n",
716 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
721 * t4_load_fw - download firmware
723 * @fw_data: the firmware image to write
726 * Write the supplied firmware image to the card's serial flash.
728 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
733 u8 first_page[SF_PAGE_SIZE];
734 const u32 *p = (const u32 *)fw_data;
735 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
736 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
737 unsigned int fw_img_start = adap->params.sf_fw_start;
738 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
741 dev_err(adap->pdev_dev, "FW image has no data\n");
745 dev_err(adap->pdev_dev,
746 "FW image size not multiple of 512 bytes\n");
749 if (ntohs(hdr->len512) * 512 != size) {
750 dev_err(adap->pdev_dev,
751 "FW image size differs from size in FW header\n");
754 if (size > FW_MAX_SIZE) {
755 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
760 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
763 if (csum != 0xffffffff) {
764 dev_err(adap->pdev_dev,
765 "corrupted firmware image, checksum %#x\n", csum);
769 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
770 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
775 * We write the correct version at the end so the driver can see a bad
776 * version if the FW write fails. Start by writing a copy of the
777 * first page with a bad version.
779 memcpy(first_page, fw_data, SF_PAGE_SIZE);
780 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
781 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
786 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
787 addr += SF_PAGE_SIZE;
788 fw_data += SF_PAGE_SIZE;
789 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
794 ret = t4_write_flash(adap,
795 fw_img_start + offsetof(struct fw_hdr, fw_ver),
796 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
799 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
804 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
805 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
808 * t4_link_start - apply link configuration to MAC/PHY
809 * @phy: the PHY to setup
810 * @mac: the MAC to setup
811 * @lc: the requested link configuration
813 * Set up a port's MAC and PHY according to a desired link configuration.
814 * - If the PHY can auto-negotiate first decide what to advertise, then
815 * enable/disable auto-negotiation as desired, and reset.
816 * - If the PHY does not auto-negotiate just reset it.
817 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
818 * otherwise do it later based on the outcome of auto-negotiation.
820 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
821 struct link_config *lc)
823 struct fw_port_cmd c;
824 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
827 if (lc->requested_fc & PAUSE_RX)
828 fc |= FW_PORT_CAP_FC_RX;
829 if (lc->requested_fc & PAUSE_TX)
830 fc |= FW_PORT_CAP_FC_TX;
832 memset(&c, 0, sizeof(c));
833 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
834 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
835 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
838 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
839 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
841 } else if (lc->autoneg == AUTONEG_DISABLE) {
842 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
843 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
845 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
851 * t4_restart_aneg - restart autonegotiation
853 * @mbox: mbox to use for the FW command
856 * Restarts autonegotiation for the selected port.
858 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
860 struct fw_port_cmd c;
862 memset(&c, 0, sizeof(c));
863 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
864 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
865 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
867 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
871 typedef void (*int_handler_t)(struct adapter *adap);
874 unsigned int mask; /* bits to check in interrupt status */
875 const char *msg; /* message to print or NULL */
876 short stat_idx; /* stat counter to increment or -1 */
877 unsigned short fatal; /* whether the condition reported is fatal */
878 int_handler_t int_handler; /* platform-specific int handler */
882 * t4_handle_intr_status - table driven interrupt handler
883 * @adapter: the adapter that generated the interrupt
884 * @reg: the interrupt status register to process
885 * @acts: table of interrupt actions
887 * A table driven interrupt handler that applies a set of masks to an
888 * interrupt status word and performs the corresponding actions if the
889 * interrupts described by the mask have occurred. The actions include
890 * optionally emitting a warning or alert message. The table is terminated
891 * by an entry specifying mask 0. Returns the number of fatal interrupt
894 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
895 const struct intr_info *acts)
898 unsigned int mask = 0;
899 unsigned int status = t4_read_reg(adapter, reg);
901 for ( ; acts->mask; ++acts) {
902 if (!(status & acts->mask))
906 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
907 status & acts->mask);
908 } else if (acts->msg && printk_ratelimit())
909 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
910 status & acts->mask);
911 if (acts->int_handler)
912 acts->int_handler(adapter);
916 if (status) /* clear processed interrupts */
917 t4_write_reg(adapter, reg, status);
922 * Interrupt handler for the PCIE module.
924 static void pcie_intr_handler(struct adapter *adapter)
926 static const struct intr_info sysbus_intr_info[] = {
927 { RNPP, "RXNP array parity error", -1, 1 },
928 { RPCP, "RXPC array parity error", -1, 1 },
929 { RCIP, "RXCIF array parity error", -1, 1 },
930 { RCCP, "Rx completions control array parity error", -1, 1 },
931 { RFTP, "RXFT array parity error", -1, 1 },
934 static const struct intr_info pcie_port_intr_info[] = {
935 { TPCP, "TXPC array parity error", -1, 1 },
936 { TNPP, "TXNP array parity error", -1, 1 },
937 { TFTP, "TXFT array parity error", -1, 1 },
938 { TCAP, "TXCA array parity error", -1, 1 },
939 { TCIP, "TXCIF array parity error", -1, 1 },
940 { RCAP, "RXCA array parity error", -1, 1 },
941 { OTDD, "outbound request TLP discarded", -1, 1 },
942 { RDPE, "Rx data parity error", -1, 1 },
943 { TDUE, "Tx uncorrectable data error", -1, 1 },
946 static const struct intr_info pcie_intr_info[] = {
947 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
948 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
949 { MSIDATAPERR, "MSI data parity error", -1, 1 },
950 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
951 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
952 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
953 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
954 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
955 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
956 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
957 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
958 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
959 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
960 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
961 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
962 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
963 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
964 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
965 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
966 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
967 { FIDPERR, "PCI FID parity error", -1, 1 },
968 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
969 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
970 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
971 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
972 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
973 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
974 { PCIESINT, "PCI core secondary fault", -1, 1 },
975 { PCIEPINT, "PCI core primary fault", -1, 1 },
976 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
982 fat = t4_handle_intr_status(adapter,
983 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
985 t4_handle_intr_status(adapter,
986 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
987 pcie_port_intr_info) +
988 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
990 t4_fatal_err(adapter);
994 * TP interrupt handler.
996 static void tp_intr_handler(struct adapter *adapter)
998 static const struct intr_info tp_intr_info[] = {
999 { 0x3fffffff, "TP parity error", -1, 1 },
1000 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1004 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1005 t4_fatal_err(adapter);
1009 * SGE interrupt handler.
1011 static void sge_intr_handler(struct adapter *adapter)
1015 static const struct intr_info sge_intr_info[] = {
1016 { ERR_CPL_EXCEED_IQE_SIZE,
1017 "SGE received CPL exceeding IQE size", -1, 1 },
1018 { ERR_INVALID_CIDX_INC,
1019 "SGE GTS CIDX increment too large", -1, 0 },
1020 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1021 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1022 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1023 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1024 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1025 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1026 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1028 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1030 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1032 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1034 { ERR_ING_CTXT_PRIO,
1035 "SGE too many priority ingress contexts", -1, 0 },
1036 { ERR_EGR_CTXT_PRIO,
1037 "SGE too many priority egress contexts", -1, 0 },
1038 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1039 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1043 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1044 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1046 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1047 (unsigned long long)v);
1048 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1049 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1052 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1054 t4_fatal_err(adapter);
1058 * CIM interrupt handler.
1060 static void cim_intr_handler(struct adapter *adapter)
1062 static const struct intr_info cim_intr_info[] = {
1063 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1064 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1065 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1066 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1067 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1068 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1069 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1072 static const struct intr_info cim_upintr_info[] = {
1073 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1074 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1075 { ILLWRINT, "CIM illegal write", -1, 1 },
1076 { ILLRDINT, "CIM illegal read", -1, 1 },
1077 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1078 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1079 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1080 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1081 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1082 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1083 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1084 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1085 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1086 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1087 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1088 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1089 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1090 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1091 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1092 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1093 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1094 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1095 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1096 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1097 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1098 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1099 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1100 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1106 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1108 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1111 t4_fatal_err(adapter);
1115 * ULP RX interrupt handler.
1117 static void ulprx_intr_handler(struct adapter *adapter)
1119 static const struct intr_info ulprx_intr_info[] = {
1120 { 0x1800000, "ULPRX context error", -1, 1 },
1121 { 0x7fffff, "ULPRX parity error", -1, 1 },
1125 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1126 t4_fatal_err(adapter);
1130 * ULP TX interrupt handler.
1132 static void ulptx_intr_handler(struct adapter *adapter)
1134 static const struct intr_info ulptx_intr_info[] = {
1135 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1137 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1139 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1141 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1143 { 0xfffffff, "ULPTX parity error", -1, 1 },
1147 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1148 t4_fatal_err(adapter);
1152 * PM TX interrupt handler.
1154 static void pmtx_intr_handler(struct adapter *adapter)
1156 static const struct intr_info pmtx_intr_info[] = {
1157 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1158 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1159 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1160 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1161 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1162 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1163 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1164 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1165 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1169 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1170 t4_fatal_err(adapter);
1174 * PM RX interrupt handler.
1176 static void pmrx_intr_handler(struct adapter *adapter)
1178 static const struct intr_info pmrx_intr_info[] = {
1179 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1180 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1181 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1182 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1183 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1184 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1188 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1189 t4_fatal_err(adapter);
1193 * CPL switch interrupt handler.
1195 static void cplsw_intr_handler(struct adapter *adapter)
1197 static const struct intr_info cplsw_intr_info[] = {
1198 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1199 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1200 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1201 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1202 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1203 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1207 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1208 t4_fatal_err(adapter);
1212 * LE interrupt handler.
1214 static void le_intr_handler(struct adapter *adap)
1216 static const struct intr_info le_intr_info[] = {
1217 { LIPMISS, "LE LIP miss", -1, 0 },
1218 { LIP0, "LE 0 LIP error", -1, 0 },
1219 { PARITYERR, "LE parity error", -1, 1 },
1220 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1221 { REQQPARERR, "LE request queue parity error", -1, 1 },
1225 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1230 * MPS interrupt handler.
1232 static void mps_intr_handler(struct adapter *adapter)
1234 static const struct intr_info mps_rx_intr_info[] = {
1235 { 0xffffff, "MPS Rx parity error", -1, 1 },
1238 static const struct intr_info mps_tx_intr_info[] = {
1239 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1240 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1241 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1242 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1243 { BUBBLE, "MPS Tx underflow", -1, 1 },
1244 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1245 { FRMERR, "MPS Tx framing error", -1, 1 },
1248 static const struct intr_info mps_trc_intr_info[] = {
1249 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1250 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1251 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1254 static const struct intr_info mps_stat_sram_intr_info[] = {
1255 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1258 static const struct intr_info mps_stat_tx_intr_info[] = {
1259 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1262 static const struct intr_info mps_stat_rx_intr_info[] = {
1263 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1266 static const struct intr_info mps_cls_intr_info[] = {
1267 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1268 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1269 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1275 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1277 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1279 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1280 mps_trc_intr_info) +
1281 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1282 mps_stat_sram_intr_info) +
1283 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1284 mps_stat_tx_intr_info) +
1285 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1286 mps_stat_rx_intr_info) +
1287 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1290 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1291 RXINT | TXINT | STATINT);
1292 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1294 t4_fatal_err(adapter);
1297 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1300 * EDC/MC interrupt handler.
1302 static void mem_intr_handler(struct adapter *adapter, int idx)
1304 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1306 unsigned int addr, cnt_addr, v;
1308 if (idx <= MEM_EDC1) {
1309 addr = EDC_REG(EDC_INT_CAUSE, idx);
1310 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1312 addr = MC_INT_CAUSE;
1313 cnt_addr = MC_ECC_STATUS;
1316 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1317 if (v & PERR_INT_CAUSE)
1318 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1320 if (v & ECC_CE_INT_CAUSE) {
1321 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1323 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1324 if (printk_ratelimit())
1325 dev_warn(adapter->pdev_dev,
1326 "%u %s correctable ECC data error%s\n",
1327 cnt, name[idx], cnt > 1 ? "s" : "");
1329 if (v & ECC_UE_INT_CAUSE)
1330 dev_alert(adapter->pdev_dev,
1331 "%s uncorrectable ECC data error\n", name[idx]);
1333 t4_write_reg(adapter, addr, v);
1334 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1335 t4_fatal_err(adapter);
1339 * MA interrupt handler.
1341 static void ma_intr_handler(struct adapter *adap)
1343 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1345 if (status & MEM_PERR_INT_CAUSE)
1346 dev_alert(adap->pdev_dev,
1347 "MA parity error, parity status %#x\n",
1348 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1349 if (status & MEM_WRAP_INT_CAUSE) {
1350 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1351 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1352 "client %u to address %#x\n",
1353 MEM_WRAP_CLIENT_NUM_GET(v),
1354 MEM_WRAP_ADDRESS_GET(v) << 4);
1356 t4_write_reg(adap, MA_INT_CAUSE, status);
1361 * SMB interrupt handler.
1363 static void smb_intr_handler(struct adapter *adap)
1365 static const struct intr_info smb_intr_info[] = {
1366 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1367 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1368 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1372 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1377 * NC-SI interrupt handler.
1379 static void ncsi_intr_handler(struct adapter *adap)
1381 static const struct intr_info ncsi_intr_info[] = {
1382 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1383 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1384 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1385 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1389 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1394 * XGMAC interrupt handler.
1396 static void xgmac_intr_handler(struct adapter *adap, int port)
1398 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1400 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1404 if (v & TXFIFO_PRTY_ERR)
1405 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1407 if (v & RXFIFO_PRTY_ERR)
1408 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1410 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1415 * PL interrupt handler.
1417 static void pl_intr_handler(struct adapter *adap)
1419 static const struct intr_info pl_intr_info[] = {
1420 { FATALPERR, "T4 fatal parity error", -1, 1 },
1421 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1425 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1429 #define PF_INTR_MASK (PFSW)
1430 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1431 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1432 CPL_SWITCH | SGE | ULP_TX)
1435 * t4_slow_intr_handler - control path interrupt handler
1436 * @adapter: the adapter
1438 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1439 * The designation 'slow' is because it involves register reads, while
1440 * data interrupts typically don't involve any MMIOs.
1442 int t4_slow_intr_handler(struct adapter *adapter)
1444 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1446 if (!(cause & GLBL_INTR_MASK))
1449 cim_intr_handler(adapter);
1451 mps_intr_handler(adapter);
1453 ncsi_intr_handler(adapter);
1455 pl_intr_handler(adapter);
1457 smb_intr_handler(adapter);
1459 xgmac_intr_handler(adapter, 0);
1461 xgmac_intr_handler(adapter, 1);
1462 if (cause & XGMAC_KR0)
1463 xgmac_intr_handler(adapter, 2);
1464 if (cause & XGMAC_KR1)
1465 xgmac_intr_handler(adapter, 3);
1467 pcie_intr_handler(adapter);
1469 mem_intr_handler(adapter, MEM_MC);
1471 mem_intr_handler(adapter, MEM_EDC0);
1473 mem_intr_handler(adapter, MEM_EDC1);
1475 le_intr_handler(adapter);
1477 tp_intr_handler(adapter);
1479 ma_intr_handler(adapter);
1481 pmtx_intr_handler(adapter);
1483 pmrx_intr_handler(adapter);
1485 ulprx_intr_handler(adapter);
1486 if (cause & CPL_SWITCH)
1487 cplsw_intr_handler(adapter);
1489 sge_intr_handler(adapter);
1491 ulptx_intr_handler(adapter);
1493 /* Clear the interrupts just processed for which we are the master. */
1494 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1495 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1500 * t4_intr_enable - enable interrupts
1501 * @adapter: the adapter whose interrupts should be enabled
1503 * Enable PF-specific interrupts for the calling function and the top-level
1504 * interrupt concentrator for global interrupts. Interrupts are already
1505 * enabled at each module, here we just enable the roots of the interrupt
1508 * Note: this function should be called only when the driver manages
1509 * non PF-specific interrupts from the various HW modules. Only one PCI
1510 * function at a time should be doing this.
1512 void t4_intr_enable(struct adapter *adapter)
1514 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1516 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1517 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1518 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1519 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1520 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1521 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1522 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1523 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
1525 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1526 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1530 * t4_intr_disable - disable interrupts
1531 * @adapter: the adapter whose interrupts should be disabled
1533 * Disable interrupts. We only disable the top-level interrupt
1534 * concentrators. The caller must be a PCI function managing global
1537 void t4_intr_disable(struct adapter *adapter)
1539 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1541 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1542 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1546 * hash_mac_addr - return the hash value of a MAC address
1547 * @addr: the 48-bit Ethernet MAC address
1549 * Hashes a MAC address according to the hash function used by HW inexact
1550 * (hash) address matching.
1552 static int hash_mac_addr(const u8 *addr)
1554 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1555 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1563 * t4_config_rss_range - configure a portion of the RSS mapping table
1564 * @adapter: the adapter
1565 * @mbox: mbox to use for the FW command
1566 * @viid: virtual interface whose RSS subtable is to be written
1567 * @start: start entry in the table to write
1568 * @n: how many table entries to write
1569 * @rspq: values for the response queue lookup table
1570 * @nrspq: number of values in @rspq
1572 * Programs the selected part of the VI's RSS mapping table with the
1573 * provided values. If @nrspq < @n the supplied values are used repeatedly
1574 * until the full table range is populated.
1576 * The caller must ensure the values in @rspq are in the range allowed for
1579 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1580 int start, int n, const u16 *rspq, unsigned int nrspq)
1583 const u16 *rsp = rspq;
1584 const u16 *rsp_end = rspq + nrspq;
1585 struct fw_rss_ind_tbl_cmd cmd;
1587 memset(&cmd, 0, sizeof(cmd));
1588 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1589 FW_CMD_REQUEST | FW_CMD_WRITE |
1590 FW_RSS_IND_TBL_CMD_VIID(viid));
1591 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1593 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1595 int nq = min(n, 32);
1596 __be32 *qp = &cmd.iq0_to_iq2;
1598 cmd.niqid = htons(nq);
1599 cmd.startidx = htons(start);
1607 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1608 if (++rsp >= rsp_end)
1610 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1611 if (++rsp >= rsp_end)
1613 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1614 if (++rsp >= rsp_end)
1621 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1629 * t4_config_glbl_rss - configure the global RSS mode
1630 * @adapter: the adapter
1631 * @mbox: mbox to use for the FW command
1632 * @mode: global RSS mode
1633 * @flags: mode-specific flags
1635 * Sets the global RSS mode.
1637 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1640 struct fw_rss_glb_config_cmd c;
1642 memset(&c, 0, sizeof(c));
1643 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1644 FW_CMD_REQUEST | FW_CMD_WRITE);
1645 c.retval_len16 = htonl(FW_LEN16(c));
1646 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1647 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1648 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1649 c.u.basicvirtual.mode_pkd =
1650 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1651 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1654 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1658 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1659 * @adap: the adapter
1660 * @v4: holds the TCP/IP counter values
1661 * @v6: holds the TCP/IPv6 counter values
1663 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1664 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1666 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1667 struct tp_tcp_stats *v6)
1669 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1671 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1672 #define STAT(x) val[STAT_IDX(x)]
1673 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1676 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1677 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1678 v4->tcpOutRsts = STAT(OUT_RST);
1679 v4->tcpInSegs = STAT64(IN_SEG);
1680 v4->tcpOutSegs = STAT64(OUT_SEG);
1681 v4->tcpRetransSegs = STAT64(RXT_SEG);
1684 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1685 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1686 v6->tcpOutRsts = STAT(OUT_RST);
1687 v6->tcpInSegs = STAT64(IN_SEG);
1688 v6->tcpOutSegs = STAT64(OUT_SEG);
1689 v6->tcpRetransSegs = STAT64(RXT_SEG);
1697 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1698 * @adap: the adapter
1699 * @mtus: where to store the MTU values
1700 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1702 * Reads the HW path MTU table.
1704 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1709 for (i = 0; i < NMTUS; ++i) {
1710 t4_write_reg(adap, TP_MTU_TABLE,
1711 MTUINDEX(0xff) | MTUVALUE(i));
1712 v = t4_read_reg(adap, TP_MTU_TABLE);
1713 mtus[i] = MTUVALUE_GET(v);
1715 mtu_log[i] = MTUWIDTH_GET(v);
1720 * init_cong_ctrl - initialize congestion control parameters
1721 * @a: the alpha values for congestion control
1722 * @b: the beta values for congestion control
1724 * Initialize the congestion control parameters.
1726 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1728 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1753 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1756 b[13] = b[14] = b[15] = b[16] = 3;
1757 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1758 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1763 /* The minimum additive increment value for the congestion control table */
1764 #define CC_MIN_INCR 2U
1767 * t4_load_mtus - write the MTU and congestion control HW tables
1768 * @adap: the adapter
1769 * @mtus: the values for the MTU table
1770 * @alpha: the values for the congestion control alpha parameter
1771 * @beta: the values for the congestion control beta parameter
1773 * Write the HW MTU table with the supplied MTUs and the high-speed
1774 * congestion control table with the supplied alpha, beta, and MTUs.
1775 * We write the two tables together because the additive increments
1776 * depend on the MTUs.
1778 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1779 const unsigned short *alpha, const unsigned short *beta)
1781 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1782 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1783 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1784 28672, 40960, 57344, 81920, 114688, 163840, 229376
1789 for (i = 0; i < NMTUS; ++i) {
1790 unsigned int mtu = mtus[i];
1791 unsigned int log2 = fls(mtu);
1793 if (!(mtu & ((1 << log2) >> 2))) /* round */
1795 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1796 MTUWIDTH(log2) | MTUVALUE(mtu));
1798 for (w = 0; w < NCCTRL_WIN; ++w) {
1801 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1804 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1805 (w << 16) | (beta[w] << 13) | inc);
1811 * get_mps_bg_map - return the buffer groups associated with a port
1812 * @adap: the adapter
1813 * @idx: the port index
1815 * Returns a bitmap indicating which MPS buffer groups are associated
1816 * with the given port. Bit i is set if buffer group i is used by the
1819 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1821 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1824 return idx == 0 ? 0xf : 0;
1826 return idx < 2 ? (3 << (2 * idx)) : 0;
1831 * t4_get_port_stats - collect port statistics
1832 * @adap: the adapter
1833 * @idx: the port index
1834 * @p: the stats structure to fill
1836 * Collect statistics related to the given port from HW.
1838 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1840 u32 bgmap = get_mps_bg_map(adap, idx);
1842 #define GET_STAT(name) \
1843 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1844 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1846 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1847 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1848 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1849 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1850 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1851 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1852 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1853 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1854 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1855 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1856 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1857 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1858 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1859 p->tx_drop = GET_STAT(TX_PORT_DROP);
1860 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1861 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1862 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1863 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1864 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1865 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1866 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1867 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1868 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1870 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1871 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1872 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1873 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1874 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1875 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1876 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1877 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1878 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1879 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1880 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1881 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1882 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1883 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1884 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1885 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1886 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1887 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1888 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1889 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1890 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1891 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1892 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1893 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1894 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1895 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1896 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1898 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1899 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1900 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1901 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1902 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1903 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1904 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1905 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1912 * t4_wol_magic_enable - enable/disable magic packet WoL
1913 * @adap: the adapter
1914 * @port: the physical port index
1915 * @addr: MAC address expected in magic packets, %NULL to disable
1917 * Enables/disables magic packet wake-on-LAN for the selected port.
1919 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1923 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1924 (addr[2] << 24) | (addr[3] << 16) |
1925 (addr[4] << 8) | addr[5]);
1926 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1927 (addr[0] << 8) | addr[1]);
1929 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1930 addr ? MAGICEN : 0);
1934 * t4_wol_pat_enable - enable/disable pattern-based WoL
1935 * @adap: the adapter
1936 * @port: the physical port index
1937 * @map: bitmap of which HW pattern filters to set
1938 * @mask0: byte mask for bytes 0-63 of a packet
1939 * @mask1: byte mask for bytes 64-127 of a packet
1940 * @crc: Ethernet CRC for selected bytes
1941 * @enable: enable/disable switch
1943 * Sets the pattern filters indicated in @map to mask out the bytes
1944 * specified in @mask0/@mask1 in received packets and compare the CRC of
1945 * the resulting packet against @crc. If @enable is %true pattern-based
1946 * WoL is enabled, otherwise disabled.
1948 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1949 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1954 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1961 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1963 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1964 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1965 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1967 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1971 /* write byte masks */
1972 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1973 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1974 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1975 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1979 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1980 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1981 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1982 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1987 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1991 #define INIT_CMD(var, cmd, rd_wr) do { \
1992 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1993 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1994 (var).retval_len16 = htonl(FW_LEN16(var)); \
1997 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2000 struct fw_ldst_cmd c;
2002 memset(&c, 0, sizeof(c));
2003 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
2005 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2006 c.cycles_to_len16 = htonl(FW_LEN16(c));
2007 c.u.addrval.addr = htonl(addr);
2008 c.u.addrval.val = htonl(val);
2010 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2014 * t4_mem_win_read_len - read memory through PCIE memory window
2015 * @adap: the adapter
2016 * @addr: address of first byte requested aligned on 32b.
2017 * @data: len bytes to hold the data read
2018 * @len: amount of data to read from window. Must be <=
2019 * MEMWIN0_APERATURE after adjusting for 16B alignment
2020 * requirements of the the memory window.
2022 * Read len bytes of data from MC starting at @addr.
2024 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2030 * Align on a 16B boundary.
2033 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2036 t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2037 t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
2039 for (i = 0; i < len; i += 4)
2040 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
2046 * t4_mdio_rd - read a PHY register through MDIO
2047 * @adap: the adapter
2048 * @mbox: mailbox to use for the FW command
2049 * @phy_addr: the PHY address
2050 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2051 * @reg: the register to read
2052 * @valp: where to store the value
2054 * Issues a FW command through the given mailbox to read a PHY register.
2056 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2057 unsigned int mmd, unsigned int reg, u16 *valp)
2060 struct fw_ldst_cmd c;
2062 memset(&c, 0, sizeof(c));
2063 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2064 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2065 c.cycles_to_len16 = htonl(FW_LEN16(c));
2066 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2067 FW_LDST_CMD_MMD(mmd));
2068 c.u.mdio.raddr = htons(reg);
2070 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2072 *valp = ntohs(c.u.mdio.rval);
2077 * t4_mdio_wr - write a PHY register through MDIO
2078 * @adap: the adapter
2079 * @mbox: mailbox to use for the FW command
2080 * @phy_addr: the PHY address
2081 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2082 * @reg: the register to write
2083 * @valp: value to write
2085 * Issues a FW command through the given mailbox to write a PHY register.
2087 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2088 unsigned int mmd, unsigned int reg, u16 val)
2090 struct fw_ldst_cmd c;
2092 memset(&c, 0, sizeof(c));
2093 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2094 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2095 c.cycles_to_len16 = htonl(FW_LEN16(c));
2096 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2097 FW_LDST_CMD_MMD(mmd));
2098 c.u.mdio.raddr = htons(reg);
2099 c.u.mdio.rval = htons(val);
2101 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2105 * t4_fw_hello - establish communication with FW
2106 * @adap: the adapter
2107 * @mbox: mailbox to use for the FW command
2108 * @evt_mbox: mailbox to receive async FW events
2109 * @master: specifies the caller's willingness to be the device master
2110 * @state: returns the current device state
2112 * Issues a command to establish communication with FW.
2114 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2115 enum dev_master master, enum dev_state *state)
2118 struct fw_hello_cmd c;
2120 INIT_CMD(c, HELLO, WRITE);
2121 c.err_to_mbasyncnot = htonl(
2122 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2123 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2124 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2125 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2127 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2128 if (ret == 0 && state) {
2129 u32 v = ntohl(c.err_to_mbasyncnot);
2130 if (v & FW_HELLO_CMD_INIT)
2131 *state = DEV_STATE_INIT;
2132 else if (v & FW_HELLO_CMD_ERR)
2133 *state = DEV_STATE_ERR;
2135 *state = DEV_STATE_UNINIT;
2141 * t4_fw_bye - end communication with FW
2142 * @adap: the adapter
2143 * @mbox: mailbox to use for the FW command
2145 * Issues a command to terminate communication with FW.
2147 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2149 struct fw_bye_cmd c;
2151 INIT_CMD(c, BYE, WRITE);
2152 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2156 * t4_init_cmd - ask FW to initialize the device
2157 * @adap: the adapter
2158 * @mbox: mailbox to use for the FW command
2160 * Issues a command to FW to partially initialize the device. This
2161 * performs initialization that generally doesn't depend on user input.
2163 int t4_early_init(struct adapter *adap, unsigned int mbox)
2165 struct fw_initialize_cmd c;
2167 INIT_CMD(c, INITIALIZE, WRITE);
2168 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2172 * t4_fw_reset - issue a reset to FW
2173 * @adap: the adapter
2174 * @mbox: mailbox to use for the FW command
2175 * @reset: specifies the type of reset to perform
2177 * Issues a reset command of the specified type to FW.
2179 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2181 struct fw_reset_cmd c;
2183 INIT_CMD(c, RESET, WRITE);
2184 c.val = htonl(reset);
2185 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2189 * t4_query_params - query FW or device parameters
2190 * @adap: the adapter
2191 * @mbox: mailbox to use for the FW command
2194 * @nparams: the number of parameters
2195 * @params: the parameter names
2196 * @val: the parameter values
2198 * Reads the value of FW or device parameters. Up to 7 parameters can be
2201 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2202 unsigned int vf, unsigned int nparams, const u32 *params,
2206 struct fw_params_cmd c;
2207 __be32 *p = &c.param[0].mnem;
2212 memset(&c, 0, sizeof(c));
2213 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2214 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2215 FW_PARAMS_CMD_VFN(vf));
2216 c.retval_len16 = htonl(FW_LEN16(c));
2217 for (i = 0; i < nparams; i++, p += 2)
2218 *p = htonl(*params++);
2220 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2222 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2228 * t4_set_params - sets FW or device parameters
2229 * @adap: the adapter
2230 * @mbox: mailbox to use for the FW command
2233 * @nparams: the number of parameters
2234 * @params: the parameter names
2235 * @val: the parameter values
2237 * Sets the value of FW or device parameters. Up to 7 parameters can be
2238 * specified at once.
2240 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2241 unsigned int vf, unsigned int nparams, const u32 *params,
2244 struct fw_params_cmd c;
2245 __be32 *p = &c.param[0].mnem;
2250 memset(&c, 0, sizeof(c));
2251 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2252 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2253 FW_PARAMS_CMD_VFN(vf));
2254 c.retval_len16 = htonl(FW_LEN16(c));
2256 *p++ = htonl(*params++);
2257 *p++ = htonl(*val++);
2260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2264 * t4_cfg_pfvf - configure PF/VF resource limits
2265 * @adap: the adapter
2266 * @mbox: mailbox to use for the FW command
2267 * @pf: the PF being configured
2268 * @vf: the VF being configured
2269 * @txq: the max number of egress queues
2270 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2271 * @rxqi: the max number of interrupt-capable ingress queues
2272 * @rxq: the max number of interruptless ingress queues
2273 * @tc: the PCI traffic class
2274 * @vi: the max number of virtual interfaces
2275 * @cmask: the channel access rights mask for the PF/VF
2276 * @pmask: the port access rights mask for the PF/VF
2277 * @nexact: the maximum number of exact MPS filters
2278 * @rcaps: read capabilities
2279 * @wxcaps: write/execute capabilities
2281 * Configures resource limits and capabilities for a physical or virtual
2284 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2285 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2286 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2287 unsigned int vi, unsigned int cmask, unsigned int pmask,
2288 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2290 struct fw_pfvf_cmd c;
2292 memset(&c, 0, sizeof(c));
2293 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2294 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2295 FW_PFVF_CMD_VFN(vf));
2296 c.retval_len16 = htonl(FW_LEN16(c));
2297 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2298 FW_PFVF_CMD_NIQ(rxq));
2299 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2300 FW_PFVF_CMD_PMASK(pmask) |
2301 FW_PFVF_CMD_NEQ(txq));
2302 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2303 FW_PFVF_CMD_NEXACTF(nexact));
2304 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2305 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2306 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2307 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2311 * t4_alloc_vi - allocate a virtual interface
2312 * @adap: the adapter
2313 * @mbox: mailbox to use for the FW command
2314 * @port: physical port associated with the VI
2315 * @pf: the PF owning the VI
2316 * @vf: the VF owning the VI
2317 * @nmac: number of MAC addresses needed (1 to 5)
2318 * @mac: the MAC addresses of the VI
2319 * @rss_size: size of RSS table slice associated with this VI
2321 * Allocates a virtual interface for the given physical port. If @mac is
2322 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2323 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2324 * stored consecutively so the space needed is @nmac * 6 bytes.
2325 * Returns a negative error number or the non-negative VI id.
2327 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2328 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2329 unsigned int *rss_size)
2334 memset(&c, 0, sizeof(c));
2335 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2336 FW_CMD_WRITE | FW_CMD_EXEC |
2337 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2338 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2339 c.portid_pkd = FW_VI_CMD_PORTID(port);
2342 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2347 memcpy(mac, c.mac, sizeof(c.mac));
2350 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2352 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2354 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2356 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2360 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2361 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
2365 * t4_set_rxmode - set Rx properties of a virtual interface
2366 * @adap: the adapter
2367 * @mbox: mailbox to use for the FW command
2369 * @mtu: the new MTU or -1
2370 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2371 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2372 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2373 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2374 * @sleep_ok: if true we may sleep while awaiting command completion
2376 * Sets Rx properties of a virtual interface.
2378 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2379 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2382 struct fw_vi_rxmode_cmd c;
2384 /* convert to FW values */
2386 mtu = FW_RXMODE_MTU_NO_CHG;
2388 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2390 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2392 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2394 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2396 memset(&c, 0, sizeof(c));
2397 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2398 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2399 c.retval_len16 = htonl(FW_LEN16(c));
2400 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2401 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2402 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2403 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2404 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2405 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2409 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2410 * @adap: the adapter
2411 * @mbox: mailbox to use for the FW command
2413 * @free: if true any existing filters for this VI id are first removed
2414 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2415 * @addr: the MAC address(es)
2416 * @idx: where to store the index of each allocated filter
2417 * @hash: pointer to hash address filter bitmap
2418 * @sleep_ok: call is allowed to sleep
2420 * Allocates an exact-match filter for each of the supplied addresses and
2421 * sets it to the corresponding address. If @idx is not %NULL it should
2422 * have at least @naddr entries, each of which will be set to the index of
2423 * the filter allocated for the corresponding MAC address. If a filter
2424 * could not be allocated for an address its index is set to 0xffff.
2425 * If @hash is not %NULL addresses that fail to allocate an exact filter
2426 * are hashed and update the hash filter bitmap pointed at by @hash.
2428 * Returns a negative error number or the number of filters allocated.
2430 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2431 unsigned int viid, bool free, unsigned int naddr,
2432 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2435 struct fw_vi_mac_cmd c;
2436 struct fw_vi_mac_exact *p;
2441 memset(&c, 0, sizeof(c));
2442 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2443 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2444 FW_VI_MAC_CMD_VIID(viid));
2445 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2446 FW_CMD_LEN16((naddr + 2) / 2));
2448 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2449 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2450 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2451 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2454 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2458 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2459 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2462 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2463 if (index < NEXACT_MAC)
2466 *hash |= (1ULL << hash_mac_addr(addr[i]));
2472 * t4_change_mac - modifies the exact-match filter for a MAC address
2473 * @adap: the adapter
2474 * @mbox: mailbox to use for the FW command
2476 * @idx: index of existing filter for old value of MAC address, or -1
2477 * @addr: the new MAC address value
2478 * @persist: whether a new MAC allocation should be persistent
2479 * @add_smt: if true also add the address to the HW SMT
2481 * Modifies an exact-match filter and sets it to the new MAC address.
2482 * Note that in general it is not possible to modify the value of a given
2483 * filter so the generic way to modify an address filter is to free the one
2484 * being used by the old address value and allocate a new filter for the
2485 * new address value. @idx can be -1 if the address is a new addition.
2487 * Returns a negative error number or the index of the filter with the new
2490 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2491 int idx, const u8 *addr, bool persist, bool add_smt)
2494 struct fw_vi_mac_cmd c;
2495 struct fw_vi_mac_exact *p = c.u.exact;
2497 if (idx < 0) /* new allocation */
2498 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2499 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2501 memset(&c, 0, sizeof(c));
2502 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2503 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2504 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2505 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2506 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2507 FW_VI_MAC_CMD_IDX(idx));
2508 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2510 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2512 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2513 if (ret >= NEXACT_MAC)
2520 * t4_set_addr_hash - program the MAC inexact-match hash filter
2521 * @adap: the adapter
2522 * @mbox: mailbox to use for the FW command
2524 * @ucast: whether the hash filter should also match unicast addresses
2525 * @vec: the value to be written to the hash filter
2526 * @sleep_ok: call is allowed to sleep
2528 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2530 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2531 bool ucast, u64 vec, bool sleep_ok)
2533 struct fw_vi_mac_cmd c;
2535 memset(&c, 0, sizeof(c));
2536 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2537 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2538 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2539 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2541 c.u.hash.hashvec = cpu_to_be64(vec);
2542 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2546 * t4_enable_vi - enable/disable a virtual interface
2547 * @adap: the adapter
2548 * @mbox: mailbox to use for the FW command
2550 * @rx_en: 1=enable Rx, 0=disable Rx
2551 * @tx_en: 1=enable Tx, 0=disable Tx
2553 * Enables/disables a virtual interface.
2555 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2556 bool rx_en, bool tx_en)
2558 struct fw_vi_enable_cmd c;
2560 memset(&c, 0, sizeof(c));
2561 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2562 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2563 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2564 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2565 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2569 * t4_identify_port - identify a VI's port by blinking its LED
2570 * @adap: the adapter
2571 * @mbox: mailbox to use for the FW command
2573 * @nblinks: how many times to blink LED at 2.5 Hz
2575 * Identifies a VI's port by blinking its LED.
2577 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2578 unsigned int nblinks)
2580 struct fw_vi_enable_cmd c;
2582 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2583 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2584 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2585 c.blinkdur = htons(nblinks);
2586 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2590 * t4_iq_free - free an ingress queue and its FLs
2591 * @adap: the adapter
2592 * @mbox: mailbox to use for the FW command
2593 * @pf: the PF owning the queues
2594 * @vf: the VF owning the queues
2595 * @iqtype: the ingress queue type
2596 * @iqid: ingress queue id
2597 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2598 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2600 * Frees an ingress queue and its associated FLs, if any.
2602 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2603 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2604 unsigned int fl0id, unsigned int fl1id)
2608 memset(&c, 0, sizeof(c));
2609 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2610 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2612 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2613 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2614 c.iqid = htons(iqid);
2615 c.fl0id = htons(fl0id);
2616 c.fl1id = htons(fl1id);
2617 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2621 * t4_eth_eq_free - free an Ethernet egress queue
2622 * @adap: the adapter
2623 * @mbox: mailbox to use for the FW command
2624 * @pf: the PF owning the queue
2625 * @vf: the VF owning the queue
2626 * @eqid: egress queue id
2628 * Frees an Ethernet egress queue.
2630 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2631 unsigned int vf, unsigned int eqid)
2633 struct fw_eq_eth_cmd c;
2635 memset(&c, 0, sizeof(c));
2636 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2637 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2638 FW_EQ_ETH_CMD_VFN(vf));
2639 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2640 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2641 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2645 * t4_ctrl_eq_free - free a control egress queue
2646 * @adap: the adapter
2647 * @mbox: mailbox to use for the FW command
2648 * @pf: the PF owning the queue
2649 * @vf: the VF owning the queue
2650 * @eqid: egress queue id
2652 * Frees a control egress queue.
2654 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2655 unsigned int vf, unsigned int eqid)
2657 struct fw_eq_ctrl_cmd c;
2659 memset(&c, 0, sizeof(c));
2660 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2661 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2662 FW_EQ_CTRL_CMD_VFN(vf));
2663 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2664 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2665 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2669 * t4_ofld_eq_free - free an offload egress queue
2670 * @adap: the adapter
2671 * @mbox: mailbox to use for the FW command
2672 * @pf: the PF owning the queue
2673 * @vf: the VF owning the queue
2674 * @eqid: egress queue id
2676 * Frees a control egress queue.
2678 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2679 unsigned int vf, unsigned int eqid)
2681 struct fw_eq_ofld_cmd c;
2683 memset(&c, 0, sizeof(c));
2684 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2685 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2686 FW_EQ_OFLD_CMD_VFN(vf));
2687 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2688 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2689 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2693 * t4_handle_fw_rpl - process a FW reply message
2694 * @adap: the adapter
2695 * @rpl: start of the FW message
2697 * Processes a FW message, such as link state change messages.
2699 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2701 u8 opcode = *(const u8 *)rpl;
2703 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2704 int speed = 0, fc = 0;
2705 const struct fw_port_cmd *p = (void *)rpl;
2706 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2707 int port = adap->chan_map[chan];
2708 struct port_info *pi = adap2pinfo(adap, port);
2709 struct link_config *lc = &pi->link_cfg;
2710 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2711 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2712 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2714 if (stat & FW_PORT_CMD_RXPAUSE)
2716 if (stat & FW_PORT_CMD_TXPAUSE)
2718 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2720 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2722 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2723 speed = SPEED_10000;
2725 if (link_ok != lc->link_ok || speed != lc->speed ||
2726 fc != lc->fc) { /* something changed */
2727 lc->link_ok = link_ok;
2730 t4_os_link_changed(adap, port, link_ok);
2732 if (mod != pi->mod_type) {
2734 t4_os_portmod_changed(adap, port);
2740 static void __devinit get_pci_mode(struct adapter *adapter,
2741 struct pci_params *p)
2745 if (pci_is_pcie(adapter->pdev)) {
2746 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
2747 p->speed = val & PCI_EXP_LNKSTA_CLS;
2748 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2753 * init_link_config - initialize a link's SW state
2754 * @lc: structure holding the link state
2755 * @caps: link capabilities
2757 * Initializes the SW state maintained for each link, including the link's
2758 * capabilities and default speed/flow-control/autonegotiation settings.
2760 static void __devinit init_link_config(struct link_config *lc,
2763 lc->supported = caps;
2764 lc->requested_speed = 0;
2766 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2767 if (lc->supported & FW_PORT_CAP_ANEG) {
2768 lc->advertising = lc->supported & ADVERT_MASK;
2769 lc->autoneg = AUTONEG_ENABLE;
2770 lc->requested_fc |= PAUSE_AUTONEG;
2772 lc->advertising = 0;
2773 lc->autoneg = AUTONEG_DISABLE;
2777 int t4_wait_dev_ready(struct adapter *adap)
2779 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2782 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2785 static int __devinit get_flash_params(struct adapter *adap)
2790 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2792 ret = sf1_read(adap, 3, 0, 1, &info);
2793 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2797 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2799 info >>= 16; /* log2 of size */
2800 if (info >= 0x14 && info < 0x18)
2801 adap->params.sf_nsec = 1 << (info - 16);
2802 else if (info == 0x18)
2803 adap->params.sf_nsec = 64;
2806 adap->params.sf_size = 1 << info;
2807 adap->params.sf_fw_start =
2808 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2813 * t4_prep_adapter - prepare SW and HW for operation
2814 * @adapter: the adapter
2815 * @reset: if true perform a HW reset
2817 * Initialize adapter SW state for the various HW modules, set initial
2818 * values for some adapter tunables, take PHYs out of reset, and
2819 * initialize the MDIO interface.
2821 int __devinit t4_prep_adapter(struct adapter *adapter)
2825 ret = t4_wait_dev_ready(adapter);
2829 get_pci_mode(adapter, &adapter->params.pci);
2830 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2832 ret = get_flash_params(adapter);
2834 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2838 ret = get_vpd_params(adapter, &adapter->params.vpd);
2842 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2845 * Default port for debugging in case we can't reach FW.
2847 adapter->params.nports = 1;
2848 adapter->params.portvec = 1;
2852 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2856 struct fw_port_cmd c;
2857 struct fw_rss_vi_config_cmd rvc;
2859 memset(&c, 0, sizeof(c));
2860 memset(&rvc, 0, sizeof(rvc));
2862 for_each_port(adap, i) {
2863 unsigned int rss_size;
2864 struct port_info *p = adap2pinfo(adap, i);
2866 while ((adap->params.portvec & (1 << j)) == 0)
2869 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2870 FW_CMD_REQUEST | FW_CMD_READ |
2871 FW_PORT_CMD_PORTID(j));
2872 c.action_to_len16 = htonl(
2873 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2875 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2879 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2886 p->rss_size = rss_size;
2887 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2888 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
2889 adap->port[i]->dev_id = j;
2891 ret = ntohl(c.u.info.lstatus_to_modtype);
2892 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2893 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2894 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
2895 p->mod_type = FW_PORT_MOD_TYPE_NA;
2897 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2898 FW_CMD_REQUEST | FW_CMD_READ |
2899 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2900 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2901 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2904 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2906 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));