1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.26.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
274 ("alarm_transceiver_temp_high"),
275 ("alarm_transceiver_temp_low"),
276 ("alarm_laser_bias_current_high"),
277 ("alarm_laser_bias_current_low"),
278 ("alarm_laser_output_power_high"),
279 ("alarm_laser_output_power_low"),
280 ("warn_transceiver_temp_high"),
281 ("warn_transceiver_temp_low"),
282 ("warn_laser_bias_current_high"),
283 ("warn_laser_bias_current_low"),
284 ("warn_laser_output_power_high"),
285 ("warn_laser_output_power_low"),
286 ("lro_aggregated_pkts"),
287 ("lro_flush_both_count"),
288 ("lro_out_of_sequence_pkts"),
289 ("lro_flush_due_to_max_pkts"),
290 ("lro_avg_aggr_pkts"),
291 ("mem_alloc_fail_cnt"),
292 ("pci_map_fail_cnt"),
293 ("watchdog_timer_cnt"),
300 ("tx_tcode_buf_abort_cnt"),
301 ("tx_tcode_desc_abort_cnt"),
302 ("tx_tcode_parity_err_cnt"),
303 ("tx_tcode_link_loss_cnt"),
304 ("tx_tcode_list_proc_err_cnt"),
305 ("rx_tcode_parity_err_cnt"),
306 ("rx_tcode_abort_cnt"),
307 ("rx_tcode_parity_abort_cnt"),
308 ("rx_tcode_rda_fail_cnt"),
309 ("rx_tcode_unkn_prot_cnt"),
310 ("rx_tcode_fcs_err_cnt"),
311 ("rx_tcode_buf_size_err_cnt"),
312 ("rx_tcode_rxd_corrupt_cnt"),
313 ("rx_tcode_unkn_err_cnt"),
321 {"mac_tmac_err_cnt"},
322 {"mac_rmac_err_cnt"},
323 {"xgxs_txgxs_err_cnt"},
324 {"xgxs_rxgxs_err_cnt"},
326 {"prc_pcix_err_cnt"},
333 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
334 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
336 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
338 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
339 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
341 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
342 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
344 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
345 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
347 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
348 init_timer(&timer); \
349 timer.function = handle; \
350 timer.data = (unsigned long) arg; \
351 mod_timer(&timer, (jiffies + exp)) \
354 static void s2io_vlan_rx_register(struct net_device *dev,
355 struct vlan_group *grp)
357 struct s2io_nic *nic = dev->priv;
360 spin_lock_irqsave(&nic->tx_lock, flags);
362 spin_unlock_irqrestore(&nic->tx_lock, flags);
365 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
366 static int vlan_strip_flag;
369 * Constants to be programmed into the Xena's registers, to configure
374 static const u64 herc_act_dtx_cfg[] = {
376 0x8000051536750000ULL, 0x80000515367500E0ULL,
378 0x8000051536750004ULL, 0x80000515367500E4ULL,
380 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
382 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
384 0x801205150D440000ULL, 0x801205150D4400E0ULL,
386 0x801205150D440004ULL, 0x801205150D4400E4ULL,
388 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
390 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
395 static const u64 xena_dtx_cfg[] = {
397 0x8000051500000000ULL, 0x80000515000000E0ULL,
399 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
401 0x8001051500000000ULL, 0x80010515000000E0ULL,
403 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
405 0x8002051500000000ULL, 0x80020515000000E0ULL,
407 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
412 * Constants for Fixing the MacAddress problem seen mostly on
415 static const u64 fix_mac[] = {
416 0x0060000000000000ULL, 0x0060600000000000ULL,
417 0x0040600000000000ULL, 0x0000600000000000ULL,
418 0x0020600000000000ULL, 0x0060600000000000ULL,
419 0x0020600000000000ULL, 0x0060600000000000ULL,
420 0x0020600000000000ULL, 0x0060600000000000ULL,
421 0x0020600000000000ULL, 0x0060600000000000ULL,
422 0x0020600000000000ULL, 0x0060600000000000ULL,
423 0x0020600000000000ULL, 0x0060600000000000ULL,
424 0x0020600000000000ULL, 0x0060600000000000ULL,
425 0x0020600000000000ULL, 0x0060600000000000ULL,
426 0x0020600000000000ULL, 0x0060600000000000ULL,
427 0x0020600000000000ULL, 0x0060600000000000ULL,
428 0x0020600000000000ULL, 0x0000600000000000ULL,
429 0x0040600000000000ULL, 0x0060600000000000ULL,
433 MODULE_LICENSE("GPL");
434 MODULE_VERSION(DRV_VERSION);
437 /* Module Loadable parameters. */
438 S2IO_PARM_INT(tx_fifo_num, 1);
439 S2IO_PARM_INT(rx_ring_num, 1);
442 S2IO_PARM_INT(rx_ring_mode, 1);
443 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
444 S2IO_PARM_INT(rmac_pause_time, 0x100);
445 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
446 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
447 S2IO_PARM_INT(shared_splits, 0);
448 S2IO_PARM_INT(tmac_util_period, 5);
449 S2IO_PARM_INT(rmac_util_period, 5);
450 S2IO_PARM_INT(bimodal, 0);
451 S2IO_PARM_INT(l3l4hdr_size, 128);
452 /* Frequency of Rx desc syncs expressed as power of 2 */
453 S2IO_PARM_INT(rxsync_frequency, 3);
454 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
455 S2IO_PARM_INT(intr_type, 2);
456 /* Large receive offload feature */
457 S2IO_PARM_INT(lro, 0);
458 /* Max pkts to be aggregated by LRO at one time. If not specified,
459 * aggregation happens until we hit max IP pkt size(64K)
461 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
462 S2IO_PARM_INT(indicate_max_pkts, 0);
464 S2IO_PARM_INT(napi, 1);
465 S2IO_PARM_INT(ufo, 0);
466 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
468 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
469 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
470 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
471 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
472 static unsigned int rts_frm_len[MAX_RX_RINGS] =
473 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
475 module_param_array(tx_fifo_len, uint, NULL, 0);
476 module_param_array(rx_ring_sz, uint, NULL, 0);
477 module_param_array(rts_frm_len, uint, NULL, 0);
481 * This table lists all the devices that this driver supports.
483 static struct pci_device_id s2io_tbl[] __devinitdata = {
484 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
485 PCI_ANY_ID, PCI_ANY_ID},
486 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
487 PCI_ANY_ID, PCI_ANY_ID},
488 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
489 PCI_ANY_ID, PCI_ANY_ID},
490 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
491 PCI_ANY_ID, PCI_ANY_ID},
495 MODULE_DEVICE_TABLE(pci, s2io_tbl);
497 static struct pci_error_handlers s2io_err_handler = {
498 .error_detected = s2io_io_error_detected,
499 .slot_reset = s2io_io_slot_reset,
500 .resume = s2io_io_resume,
503 static struct pci_driver s2io_driver = {
505 .id_table = s2io_tbl,
506 .probe = s2io_init_nic,
507 .remove = __devexit_p(s2io_rem_nic),
508 .err_handler = &s2io_err_handler,
511 /* A simplifier macro used both by init and free shared_mem Fns(). */
512 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
515 * init_shared_mem - Allocation and Initialization of Memory
516 * @nic: Device private variable.
517 * Description: The function allocates all the memory areas shared
518 * between the NIC and the driver. This includes Tx descriptors,
519 * Rx descriptors and the statistics block.
522 static int init_shared_mem(struct s2io_nic *nic)
525 void *tmp_v_addr, *tmp_v_addr_next;
526 dma_addr_t tmp_p_addr, tmp_p_addr_next;
527 struct RxD_block *pre_rxd_blk = NULL;
529 int lst_size, lst_per_page;
530 struct net_device *dev = nic->dev;
534 struct mac_info *mac_control;
535 struct config_param *config;
536 unsigned long long mem_allocated = 0;
538 mac_control = &nic->mac_control;
539 config = &nic->config;
542 /* Allocation and initialization of TXDLs in FIOFs */
544 for (i = 0; i < config->tx_fifo_num; i++) {
545 size += config->tx_cfg[i].fifo_len;
547 if (size > MAX_AVAILABLE_TXDS) {
548 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
549 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
553 lst_size = (sizeof(struct TxD) * config->max_txds);
554 lst_per_page = PAGE_SIZE / lst_size;
556 for (i = 0; i < config->tx_fifo_num; i++) {
557 int fifo_len = config->tx_cfg[i].fifo_len;
558 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
559 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
561 if (!mac_control->fifos[i].list_info) {
563 "Malloc failed for list_info\n");
566 mem_allocated += list_holder_size;
567 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
569 for (i = 0; i < config->tx_fifo_num; i++) {
570 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
572 mac_control->fifos[i].tx_curr_put_info.offset = 0;
573 mac_control->fifos[i].tx_curr_put_info.fifo_len =
574 config->tx_cfg[i].fifo_len - 1;
575 mac_control->fifos[i].tx_curr_get_info.offset = 0;
576 mac_control->fifos[i].tx_curr_get_info.fifo_len =
577 config->tx_cfg[i].fifo_len - 1;
578 mac_control->fifos[i].fifo_no = i;
579 mac_control->fifos[i].nic = nic;
580 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
582 for (j = 0; j < page_num; j++) {
586 tmp_v = pci_alloc_consistent(nic->pdev,
590 "pci_alloc_consistent ");
591 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
594 /* If we got a zero DMA address(can happen on
595 * certain platforms like PPC), reallocate.
596 * Store virtual address of page we don't want,
600 mac_control->zerodma_virt_addr = tmp_v;
602 "%s: Zero DMA address for TxDL. ", dev->name);
604 "Virtual address %p\n", tmp_v);
605 tmp_v = pci_alloc_consistent(nic->pdev,
609 "pci_alloc_consistent ");
610 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
613 mem_allocated += PAGE_SIZE;
615 while (k < lst_per_page) {
616 int l = (j * lst_per_page) + k;
617 if (l == config->tx_cfg[i].fifo_len)
619 mac_control->fifos[i].list_info[l].list_virt_addr =
620 tmp_v + (k * lst_size);
621 mac_control->fifos[i].list_info[l].list_phy_addr =
622 tmp_p + (k * lst_size);
628 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
629 if (!nic->ufo_in_band_v)
631 mem_allocated += (size * sizeof(u64));
633 /* Allocation and initialization of RXDs in Rings */
635 for (i = 0; i < config->rx_ring_num; i++) {
636 if (config->rx_cfg[i].num_rxd %
637 (rxd_count[nic->rxd_mode] + 1)) {
638 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
639 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
641 DBG_PRINT(ERR_DBG, "RxDs per Block");
644 size += config->rx_cfg[i].num_rxd;
645 mac_control->rings[i].block_count =
646 config->rx_cfg[i].num_rxd /
647 (rxd_count[nic->rxd_mode] + 1 );
648 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
649 mac_control->rings[i].block_count;
651 if (nic->rxd_mode == RXD_MODE_1)
652 size = (size * (sizeof(struct RxD1)));
654 size = (size * (sizeof(struct RxD3)));
656 for (i = 0; i < config->rx_ring_num; i++) {
657 mac_control->rings[i].rx_curr_get_info.block_index = 0;
658 mac_control->rings[i].rx_curr_get_info.offset = 0;
659 mac_control->rings[i].rx_curr_get_info.ring_len =
660 config->rx_cfg[i].num_rxd - 1;
661 mac_control->rings[i].rx_curr_put_info.block_index = 0;
662 mac_control->rings[i].rx_curr_put_info.offset = 0;
663 mac_control->rings[i].rx_curr_put_info.ring_len =
664 config->rx_cfg[i].num_rxd - 1;
665 mac_control->rings[i].nic = nic;
666 mac_control->rings[i].ring_no = i;
668 blk_cnt = config->rx_cfg[i].num_rxd /
669 (rxd_count[nic->rxd_mode] + 1);
670 /* Allocating all the Rx blocks */
671 for (j = 0; j < blk_cnt; j++) {
672 struct rx_block_info *rx_blocks;
675 rx_blocks = &mac_control->rings[i].rx_blocks[j];
676 size = SIZE_OF_BLOCK; //size is always page size
677 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
679 if (tmp_v_addr == NULL) {
681 * In case of failure, free_shared_mem()
682 * is called, which should free any
683 * memory that was alloced till the
686 rx_blocks->block_virt_addr = tmp_v_addr;
689 mem_allocated += size;
690 memset(tmp_v_addr, 0, size);
691 rx_blocks->block_virt_addr = tmp_v_addr;
692 rx_blocks->block_dma_addr = tmp_p_addr;
693 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
694 rxd_count[nic->rxd_mode],
696 if (!rx_blocks->rxds)
699 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
700 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
701 rx_blocks->rxds[l].virt_addr =
702 rx_blocks->block_virt_addr +
703 (rxd_size[nic->rxd_mode] * l);
704 rx_blocks->rxds[l].dma_addr =
705 rx_blocks->block_dma_addr +
706 (rxd_size[nic->rxd_mode] * l);
709 /* Interlinking all Rx Blocks */
710 for (j = 0; j < blk_cnt; j++) {
712 mac_control->rings[i].rx_blocks[j].block_virt_addr;
714 mac_control->rings[i].rx_blocks[(j + 1) %
715 blk_cnt].block_virt_addr;
717 mac_control->rings[i].rx_blocks[j].block_dma_addr;
719 mac_control->rings[i].rx_blocks[(j + 1) %
720 blk_cnt].block_dma_addr;
722 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
723 pre_rxd_blk->reserved_2_pNext_RxD_block =
724 (unsigned long) tmp_v_addr_next;
725 pre_rxd_blk->pNext_RxD_Blk_physical =
726 (u64) tmp_p_addr_next;
729 if (nic->rxd_mode == RXD_MODE_3B) {
731 * Allocation of Storages for buffer addresses in 2BUFF mode
732 * and the buffers as well.
734 for (i = 0; i < config->rx_ring_num; i++) {
735 blk_cnt = config->rx_cfg[i].num_rxd /
736 (rxd_count[nic->rxd_mode]+ 1);
737 mac_control->rings[i].ba =
738 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
740 if (!mac_control->rings[i].ba)
742 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
743 for (j = 0; j < blk_cnt; j++) {
745 mac_control->rings[i].ba[j] =
746 kmalloc((sizeof(struct buffAdd) *
747 (rxd_count[nic->rxd_mode] + 1)),
749 if (!mac_control->rings[i].ba[j])
751 mem_allocated += (sizeof(struct buffAdd) * \
752 (rxd_count[nic->rxd_mode] + 1));
753 while (k != rxd_count[nic->rxd_mode]) {
754 ba = &mac_control->rings[i].ba[j][k];
756 ba->ba_0_org = (void *) kmalloc
757 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
761 (BUF0_LEN + ALIGN_SIZE);
762 tmp = (unsigned long)ba->ba_0_org;
764 tmp &= ~((unsigned long) ALIGN_SIZE);
765 ba->ba_0 = (void *) tmp;
767 ba->ba_1_org = (void *) kmalloc
768 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
772 += (BUF1_LEN + ALIGN_SIZE);
773 tmp = (unsigned long) ba->ba_1_org;
775 tmp &= ~((unsigned long) ALIGN_SIZE);
776 ba->ba_1 = (void *) tmp;
783 /* Allocation and initialization of Statistics block */
784 size = sizeof(struct stat_block);
785 mac_control->stats_mem = pci_alloc_consistent
786 (nic->pdev, size, &mac_control->stats_mem_phy);
788 if (!mac_control->stats_mem) {
790 * In case of failure, free_shared_mem() is called, which
791 * should free any memory that was alloced till the
796 mem_allocated += size;
797 mac_control->stats_mem_sz = size;
799 tmp_v_addr = mac_control->stats_mem;
800 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
801 memset(tmp_v_addr, 0, size);
802 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
803 (unsigned long long) tmp_p_addr);
804 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
809 * free_shared_mem - Free the allocated Memory
810 * @nic: Device private variable.
811 * Description: This function is to free all memory locations allocated by
812 * the init_shared_mem() function and return it to the kernel.
815 static void free_shared_mem(struct s2io_nic *nic)
817 int i, j, blk_cnt, size;
820 dma_addr_t tmp_p_addr;
821 struct mac_info *mac_control;
822 struct config_param *config;
823 int lst_size, lst_per_page;
824 struct net_device *dev;
832 mac_control = &nic->mac_control;
833 config = &nic->config;
835 lst_size = (sizeof(struct TxD) * config->max_txds);
836 lst_per_page = PAGE_SIZE / lst_size;
838 for (i = 0; i < config->tx_fifo_num; i++) {
839 ufo_size += config->tx_cfg[i].fifo_len;
840 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
842 for (j = 0; j < page_num; j++) {
843 int mem_blks = (j * lst_per_page);
844 if (!mac_control->fifos[i].list_info)
846 if (!mac_control->fifos[i].list_info[mem_blks].
849 pci_free_consistent(nic->pdev, PAGE_SIZE,
850 mac_control->fifos[i].
853 mac_control->fifos[i].
856 nic->mac_control.stats_info->sw_stat.mem_freed
859 /* If we got a zero DMA address during allocation,
862 if (mac_control->zerodma_virt_addr) {
863 pci_free_consistent(nic->pdev, PAGE_SIZE,
864 mac_control->zerodma_virt_addr,
867 "%s: Freeing TxDL with zero DMA addr. ",
869 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
870 mac_control->zerodma_virt_addr);
871 nic->mac_control.stats_info->sw_stat.mem_freed
874 kfree(mac_control->fifos[i].list_info);
875 nic->mac_control.stats_info->sw_stat.mem_freed +=
876 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
879 size = SIZE_OF_BLOCK;
880 for (i = 0; i < config->rx_ring_num; i++) {
881 blk_cnt = mac_control->rings[i].block_count;
882 for (j = 0; j < blk_cnt; j++) {
883 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
885 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
887 if (tmp_v_addr == NULL)
889 pci_free_consistent(nic->pdev, size,
890 tmp_v_addr, tmp_p_addr);
891 nic->mac_control.stats_info->sw_stat.mem_freed += size;
892 kfree(mac_control->rings[i].rx_blocks[j].rxds);
893 nic->mac_control.stats_info->sw_stat.mem_freed +=
894 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
898 if (nic->rxd_mode == RXD_MODE_3B) {
899 /* Freeing buffer storage addresses in 2BUFF mode. */
900 for (i = 0; i < config->rx_ring_num; i++) {
901 blk_cnt = config->rx_cfg[i].num_rxd /
902 (rxd_count[nic->rxd_mode] + 1);
903 for (j = 0; j < blk_cnt; j++) {
905 if (!mac_control->rings[i].ba[j])
907 while (k != rxd_count[nic->rxd_mode]) {
909 &mac_control->rings[i].ba[j][k];
911 nic->mac_control.stats_info->sw_stat.\
912 mem_freed += (BUF0_LEN + ALIGN_SIZE);
914 nic->mac_control.stats_info->sw_stat.\
915 mem_freed += (BUF1_LEN + ALIGN_SIZE);
918 kfree(mac_control->rings[i].ba[j]);
919 nic->mac_control.stats_info->sw_stat.mem_freed +=
920 (sizeof(struct buffAdd) *
921 (rxd_count[nic->rxd_mode] + 1));
923 kfree(mac_control->rings[i].ba);
924 nic->mac_control.stats_info->sw_stat.mem_freed +=
925 (sizeof(struct buffAdd *) * blk_cnt);
929 if (mac_control->stats_mem) {
930 pci_free_consistent(nic->pdev,
931 mac_control->stats_mem_sz,
932 mac_control->stats_mem,
933 mac_control->stats_mem_phy);
934 nic->mac_control.stats_info->sw_stat.mem_freed +=
935 mac_control->stats_mem_sz;
937 if (nic->ufo_in_band_v) {
938 kfree(nic->ufo_in_band_v);
939 nic->mac_control.stats_info->sw_stat.mem_freed
940 += (ufo_size * sizeof(u64));
945 * s2io_verify_pci_mode -
948 static int s2io_verify_pci_mode(struct s2io_nic *nic)
950 struct XENA_dev_config __iomem *bar0 = nic->bar0;
951 register u64 val64 = 0;
954 val64 = readq(&bar0->pci_mode);
955 mode = (u8)GET_PCI_MODE(val64);
957 if ( val64 & PCI_MODE_UNKNOWN_MODE)
958 return -1; /* Unknown PCI mode */
962 #define NEC_VENID 0x1033
963 #define NEC_DEVID 0x0125
964 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
966 struct pci_dev *tdev = NULL;
967 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
968 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
969 if (tdev->bus == s2io_pdev->bus->parent)
977 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
979 * s2io_print_pci_mode -
981 static int s2io_print_pci_mode(struct s2io_nic *nic)
983 struct XENA_dev_config __iomem *bar0 = nic->bar0;
984 register u64 val64 = 0;
986 struct config_param *config = &nic->config;
988 val64 = readq(&bar0->pci_mode);
989 mode = (u8)GET_PCI_MODE(val64);
991 if ( val64 & PCI_MODE_UNKNOWN_MODE)
992 return -1; /* Unknown PCI mode */
994 config->bus_speed = bus_speed[mode];
996 if (s2io_on_nec_bridge(nic->pdev)) {
997 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1002 if (val64 & PCI_MODE_32_BITS) {
1003 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1005 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1009 case PCI_MODE_PCI_33:
1010 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1012 case PCI_MODE_PCI_66:
1013 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1015 case PCI_MODE_PCIX_M1_66:
1016 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1018 case PCI_MODE_PCIX_M1_100:
1019 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1021 case PCI_MODE_PCIX_M1_133:
1022 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1024 case PCI_MODE_PCIX_M2_66:
1025 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1027 case PCI_MODE_PCIX_M2_100:
1028 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1030 case PCI_MODE_PCIX_M2_133:
1031 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1034 return -1; /* Unsupported bus speed */
1041 * init_nic - Initialization of hardware
1042 * @nic: device peivate variable
1043 * Description: The function sequentially configures every block
1044 * of the H/W from their reset values.
1045 * Return Value: SUCCESS on success and
1046 * '-1' on failure (endian settings incorrect).
1049 static int init_nic(struct s2io_nic *nic)
1051 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1052 struct net_device *dev = nic->dev;
1053 register u64 val64 = 0;
1057 struct mac_info *mac_control;
1058 struct config_param *config;
1060 unsigned long long mem_share;
1063 mac_control = &nic->mac_control;
1064 config = &nic->config;
1066 /* to set the swapper controle on the card */
1067 if(s2io_set_swapper(nic)) {
1068 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1073 * Herc requires EOI to be removed from reset before XGXS, so..
1075 if (nic->device_type & XFRAME_II_DEVICE) {
1076 val64 = 0xA500000000ULL;
1077 writeq(val64, &bar0->sw_reset);
1079 val64 = readq(&bar0->sw_reset);
1082 /* Remove XGXS from reset state */
1084 writeq(val64, &bar0->sw_reset);
1086 val64 = readq(&bar0->sw_reset);
1088 /* Enable Receiving broadcasts */
1089 add = &bar0->mac_cfg;
1090 val64 = readq(&bar0->mac_cfg);
1091 val64 |= MAC_RMAC_BCAST_ENABLE;
1092 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1093 writel((u32) val64, add);
1094 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1095 writel((u32) (val64 >> 32), (add + 4));
1097 /* Read registers in all blocks */
1098 val64 = readq(&bar0->mac_int_mask);
1099 val64 = readq(&bar0->mc_int_mask);
1100 val64 = readq(&bar0->xgxs_int_mask);
1104 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1106 if (nic->device_type & XFRAME_II_DEVICE) {
1107 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1108 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1109 &bar0->dtx_control, UF);
1111 msleep(1); /* Necessary!! */
1115 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1116 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1117 &bar0->dtx_control, UF);
1118 val64 = readq(&bar0->dtx_control);
1123 /* Tx DMA Initialization */
1125 writeq(val64, &bar0->tx_fifo_partition_0);
1126 writeq(val64, &bar0->tx_fifo_partition_1);
1127 writeq(val64, &bar0->tx_fifo_partition_2);
1128 writeq(val64, &bar0->tx_fifo_partition_3);
1131 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1133 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1134 13) | vBIT(config->tx_cfg[i].fifo_priority,
1137 if (i == (config->tx_fifo_num - 1)) {
1144 writeq(val64, &bar0->tx_fifo_partition_0);
1148 writeq(val64, &bar0->tx_fifo_partition_1);
1152 writeq(val64, &bar0->tx_fifo_partition_2);
1156 writeq(val64, &bar0->tx_fifo_partition_3);
1162 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1163 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1165 if ((nic->device_type == XFRAME_I_DEVICE) &&
1166 (nic->pdev->revision < 4))
1167 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1169 val64 = readq(&bar0->tx_fifo_partition_0);
1170 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1171 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1174 * Initialization of Tx_PA_CONFIG register to ignore packet
1175 * integrity checking.
1177 val64 = readq(&bar0->tx_pa_cfg);
1178 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1179 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1180 writeq(val64, &bar0->tx_pa_cfg);
1182 /* Rx DMA intialization. */
1184 for (i = 0; i < config->rx_ring_num; i++) {
1186 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1189 writeq(val64, &bar0->rx_queue_priority);
1192 * Allocating equal share of memory to all the
1196 if (nic->device_type & XFRAME_II_DEVICE)
1201 for (i = 0; i < config->rx_ring_num; i++) {
1204 mem_share = (mem_size / config->rx_ring_num +
1205 mem_size % config->rx_ring_num);
1206 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1209 mem_share = (mem_size / config->rx_ring_num);
1210 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1213 mem_share = (mem_size / config->rx_ring_num);
1214 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1217 mem_share = (mem_size / config->rx_ring_num);
1218 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1221 mem_share = (mem_size / config->rx_ring_num);
1222 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1225 mem_share = (mem_size / config->rx_ring_num);
1226 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1229 mem_share = (mem_size / config->rx_ring_num);
1230 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1233 mem_share = (mem_size / config->rx_ring_num);
1234 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1238 writeq(val64, &bar0->rx_queue_cfg);
1241 * Filling Tx round robin registers
1242 * as per the number of FIFOs
1244 switch (config->tx_fifo_num) {
1246 val64 = 0x0000000000000000ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_0);
1248 writeq(val64, &bar0->tx_w_round_robin_1);
1249 writeq(val64, &bar0->tx_w_round_robin_2);
1250 writeq(val64, &bar0->tx_w_round_robin_3);
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1254 val64 = 0x0000010000010000ULL;
1255 writeq(val64, &bar0->tx_w_round_robin_0);
1256 val64 = 0x0100000100000100ULL;
1257 writeq(val64, &bar0->tx_w_round_robin_1);
1258 val64 = 0x0001000001000001ULL;
1259 writeq(val64, &bar0->tx_w_round_robin_2);
1260 val64 = 0x0000010000010000ULL;
1261 writeq(val64, &bar0->tx_w_round_robin_3);
1262 val64 = 0x0100000000000000ULL;
1263 writeq(val64, &bar0->tx_w_round_robin_4);
1266 val64 = 0x0001000102000001ULL;
1267 writeq(val64, &bar0->tx_w_round_robin_0);
1268 val64 = 0x0001020000010001ULL;
1269 writeq(val64, &bar0->tx_w_round_robin_1);
1270 val64 = 0x0200000100010200ULL;
1271 writeq(val64, &bar0->tx_w_round_robin_2);
1272 val64 = 0x0001000102000001ULL;
1273 writeq(val64, &bar0->tx_w_round_robin_3);
1274 val64 = 0x0001020000000000ULL;
1275 writeq(val64, &bar0->tx_w_round_robin_4);
1278 val64 = 0x0001020300010200ULL;
1279 writeq(val64, &bar0->tx_w_round_robin_0);
1280 val64 = 0x0100000102030001ULL;
1281 writeq(val64, &bar0->tx_w_round_robin_1);
1282 val64 = 0x0200010000010203ULL;
1283 writeq(val64, &bar0->tx_w_round_robin_2);
1284 val64 = 0x0001020001000001ULL;
1285 writeq(val64, &bar0->tx_w_round_robin_3);
1286 val64 = 0x0203000100000000ULL;
1287 writeq(val64, &bar0->tx_w_round_robin_4);
1290 val64 = 0x0001000203000102ULL;
1291 writeq(val64, &bar0->tx_w_round_robin_0);
1292 val64 = 0x0001020001030004ULL;
1293 writeq(val64, &bar0->tx_w_round_robin_1);
1294 val64 = 0x0001000203000102ULL;
1295 writeq(val64, &bar0->tx_w_round_robin_2);
1296 val64 = 0x0001020001030004ULL;
1297 writeq(val64, &bar0->tx_w_round_robin_3);
1298 val64 = 0x0001000000000000ULL;
1299 writeq(val64, &bar0->tx_w_round_robin_4);
1302 val64 = 0x0001020304000102ULL;
1303 writeq(val64, &bar0->tx_w_round_robin_0);
1304 val64 = 0x0304050001020001ULL;
1305 writeq(val64, &bar0->tx_w_round_robin_1);
1306 val64 = 0x0203000100000102ULL;
1307 writeq(val64, &bar0->tx_w_round_robin_2);
1308 val64 = 0x0304000102030405ULL;
1309 writeq(val64, &bar0->tx_w_round_robin_3);
1310 val64 = 0x0001000200000000ULL;
1311 writeq(val64, &bar0->tx_w_round_robin_4);
1314 val64 = 0x0001020001020300ULL;
1315 writeq(val64, &bar0->tx_w_round_robin_0);
1316 val64 = 0x0102030400010203ULL;
1317 writeq(val64, &bar0->tx_w_round_robin_1);
1318 val64 = 0x0405060001020001ULL;
1319 writeq(val64, &bar0->tx_w_round_robin_2);
1320 val64 = 0x0304050000010200ULL;
1321 writeq(val64, &bar0->tx_w_round_robin_3);
1322 val64 = 0x0102030000000000ULL;
1323 writeq(val64, &bar0->tx_w_round_robin_4);
1326 val64 = 0x0001020300040105ULL;
1327 writeq(val64, &bar0->tx_w_round_robin_0);
1328 val64 = 0x0200030106000204ULL;
1329 writeq(val64, &bar0->tx_w_round_robin_1);
1330 val64 = 0x0103000502010007ULL;
1331 writeq(val64, &bar0->tx_w_round_robin_2);
1332 val64 = 0x0304010002060500ULL;
1333 writeq(val64, &bar0->tx_w_round_robin_3);
1334 val64 = 0x0103020400000000ULL;
1335 writeq(val64, &bar0->tx_w_round_robin_4);
1339 /* Enable all configured Tx FIFO partitions */
1340 val64 = readq(&bar0->tx_fifo_partition_0);
1341 val64 |= (TX_FIFO_PARTITION_EN);
1342 writeq(val64, &bar0->tx_fifo_partition_0);
1344 /* Filling the Rx round robin registers as per the
1345 * number of Rings and steering based on QoS.
1347 switch (config->rx_ring_num) {
1349 val64 = 0x8080808080808080ULL;
1350 writeq(val64, &bar0->rts_qos_steering);
1353 val64 = 0x0000010000010000ULL;
1354 writeq(val64, &bar0->rx_w_round_robin_0);
1355 val64 = 0x0100000100000100ULL;
1356 writeq(val64, &bar0->rx_w_round_robin_1);
1357 val64 = 0x0001000001000001ULL;
1358 writeq(val64, &bar0->rx_w_round_robin_2);
1359 val64 = 0x0000010000010000ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_3);
1361 val64 = 0x0100000000000000ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_4);
1364 val64 = 0x8080808040404040ULL;
1365 writeq(val64, &bar0->rts_qos_steering);
1368 val64 = 0x0001000102000001ULL;
1369 writeq(val64, &bar0->rx_w_round_robin_0);
1370 val64 = 0x0001020000010001ULL;
1371 writeq(val64, &bar0->rx_w_round_robin_1);
1372 val64 = 0x0200000100010200ULL;
1373 writeq(val64, &bar0->rx_w_round_robin_2);
1374 val64 = 0x0001000102000001ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_3);
1376 val64 = 0x0001020000000000ULL;
1377 writeq(val64, &bar0->rx_w_round_robin_4);
1379 val64 = 0x8080804040402020ULL;
1380 writeq(val64, &bar0->rts_qos_steering);
1383 val64 = 0x0001020300010200ULL;
1384 writeq(val64, &bar0->rx_w_round_robin_0);
1385 val64 = 0x0100000102030001ULL;
1386 writeq(val64, &bar0->rx_w_round_robin_1);
1387 val64 = 0x0200010000010203ULL;
1388 writeq(val64, &bar0->rx_w_round_robin_2);
1389 val64 = 0x0001020001000001ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_3);
1391 val64 = 0x0203000100000000ULL;
1392 writeq(val64, &bar0->rx_w_round_robin_4);
1394 val64 = 0x8080404020201010ULL;
1395 writeq(val64, &bar0->rts_qos_steering);
1398 val64 = 0x0001000203000102ULL;
1399 writeq(val64, &bar0->rx_w_round_robin_0);
1400 val64 = 0x0001020001030004ULL;
1401 writeq(val64, &bar0->rx_w_round_robin_1);
1402 val64 = 0x0001000203000102ULL;
1403 writeq(val64, &bar0->rx_w_round_robin_2);
1404 val64 = 0x0001020001030004ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_3);
1406 val64 = 0x0001000000000000ULL;
1407 writeq(val64, &bar0->rx_w_round_robin_4);
1409 val64 = 0x8080404020201008ULL;
1410 writeq(val64, &bar0->rts_qos_steering);
1413 val64 = 0x0001020304000102ULL;
1414 writeq(val64, &bar0->rx_w_round_robin_0);
1415 val64 = 0x0304050001020001ULL;
1416 writeq(val64, &bar0->rx_w_round_robin_1);
1417 val64 = 0x0203000100000102ULL;
1418 writeq(val64, &bar0->rx_w_round_robin_2);
1419 val64 = 0x0304000102030405ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_3);
1421 val64 = 0x0001000200000000ULL;
1422 writeq(val64, &bar0->rx_w_round_robin_4);
1424 val64 = 0x8080404020100804ULL;
1425 writeq(val64, &bar0->rts_qos_steering);
1428 val64 = 0x0001020001020300ULL;
1429 writeq(val64, &bar0->rx_w_round_robin_0);
1430 val64 = 0x0102030400010203ULL;
1431 writeq(val64, &bar0->rx_w_round_robin_1);
1432 val64 = 0x0405060001020001ULL;
1433 writeq(val64, &bar0->rx_w_round_robin_2);
1434 val64 = 0x0304050000010200ULL;
1435 writeq(val64, &bar0->rx_w_round_robin_3);
1436 val64 = 0x0102030000000000ULL;
1437 writeq(val64, &bar0->rx_w_round_robin_4);
1439 val64 = 0x8080402010080402ULL;
1440 writeq(val64, &bar0->rts_qos_steering);
1443 val64 = 0x0001020300040105ULL;
1444 writeq(val64, &bar0->rx_w_round_robin_0);
1445 val64 = 0x0200030106000204ULL;
1446 writeq(val64, &bar0->rx_w_round_robin_1);
1447 val64 = 0x0103000502010007ULL;
1448 writeq(val64, &bar0->rx_w_round_robin_2);
1449 val64 = 0x0304010002060500ULL;
1450 writeq(val64, &bar0->rx_w_round_robin_3);
1451 val64 = 0x0103020400000000ULL;
1452 writeq(val64, &bar0->rx_w_round_robin_4);
1454 val64 = 0x8040201008040201ULL;
1455 writeq(val64, &bar0->rts_qos_steering);
1461 for (i = 0; i < 8; i++)
1462 writeq(val64, &bar0->rts_frm_len_n[i]);
1464 /* Set the default rts frame length for the rings configured */
1465 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1466 for (i = 0 ; i < config->rx_ring_num ; i++)
1467 writeq(val64, &bar0->rts_frm_len_n[i]);
1469 /* Set the frame length for the configured rings
1470 * desired by the user
1472 for (i = 0; i < config->rx_ring_num; i++) {
1473 /* If rts_frm_len[i] == 0 then it is assumed that user not
1474 * specified frame length steering.
1475 * If the user provides the frame length then program
1476 * the rts_frm_len register for those values or else
1477 * leave it as it is.
1479 if (rts_frm_len[i] != 0) {
1480 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1481 &bar0->rts_frm_len_n[i]);
1485 /* Disable differentiated services steering logic */
1486 for (i = 0; i < 64; i++) {
1487 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1488 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1490 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1495 /* Program statistics memory */
1496 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1498 if (nic->device_type == XFRAME_II_DEVICE) {
1499 val64 = STAT_BC(0x320);
1500 writeq(val64, &bar0->stat_byte_cnt);
1504 * Initializing the sampling rate for the device to calculate the
1505 * bandwidth utilization.
1507 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1508 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1509 writeq(val64, &bar0->mac_link_util);
1513 * Initializing the Transmit and Receive Traffic Interrupt
1517 * TTI Initialization. Default Tx timer gets us about
1518 * 250 interrupts per sec. Continuous interrupts are enabled
1521 if (nic->device_type == XFRAME_II_DEVICE) {
1522 int count = (nic->config.bus_speed * 125)/2;
1523 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1526 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1528 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1529 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1530 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1531 if (use_continuous_tx_intrs)
1532 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1533 writeq(val64, &bar0->tti_data1_mem);
1535 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1536 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1537 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1538 writeq(val64, &bar0->tti_data2_mem);
1540 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1541 writeq(val64, &bar0->tti_command_mem);
1544 * Once the operation completes, the Strobe bit of the command
1545 * register will be reset. We poll for this particular condition
1546 * We wait for a maximum of 500ms for the operation to complete,
1547 * if it's not complete by then we return error.
1551 val64 = readq(&bar0->tti_command_mem);
1552 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1556 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1564 if (nic->config.bimodal) {
1566 for (k = 0; k < config->rx_ring_num; k++) {
1567 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1568 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1569 writeq(val64, &bar0->tti_command_mem);
1572 * Once the operation completes, the Strobe bit of the command
1573 * register will be reset. We poll for this particular condition
1574 * We wait for a maximum of 500ms for the operation to complete,
1575 * if it's not complete by then we return error.
1579 val64 = readq(&bar0->tti_command_mem);
1580 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1585 "%s: TTI init Failed\n",
1595 /* RTI Initialization */
1596 if (nic->device_type == XFRAME_II_DEVICE) {
1598 * Programmed to generate Apprx 500 Intrs per
1601 int count = (nic->config.bus_speed * 125)/4;
1602 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1604 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1606 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1607 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1608 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1610 writeq(val64, &bar0->rti_data1_mem);
1612 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1613 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1614 if (nic->intr_type == MSI_X)
1615 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1616 RTI_DATA2_MEM_RX_UFC_D(0x40));
1618 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1619 RTI_DATA2_MEM_RX_UFC_D(0x80));
1620 writeq(val64, &bar0->rti_data2_mem);
1622 for (i = 0; i < config->rx_ring_num; i++) {
1623 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1624 | RTI_CMD_MEM_OFFSET(i);
1625 writeq(val64, &bar0->rti_command_mem);
1628 * Once the operation completes, the Strobe bit of the
1629 * command register will be reset. We poll for this
1630 * particular condition. We wait for a maximum of 500ms
1631 * for the operation to complete, if it's not complete
1632 * by then we return error.
1636 val64 = readq(&bar0->rti_command_mem);
1637 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1641 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1652 * Initializing proper values as Pause threshold into all
1653 * the 8 Queues on Rx side.
1655 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1656 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1658 /* Disable RMAC PAD STRIPPING */
1659 add = &bar0->mac_cfg;
1660 val64 = readq(&bar0->mac_cfg);
1661 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1662 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1663 writel((u32) (val64), add);
1664 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1665 writel((u32) (val64 >> 32), (add + 4));
1666 val64 = readq(&bar0->mac_cfg);
1668 /* Enable FCS stripping by adapter */
1669 add = &bar0->mac_cfg;
1670 val64 = readq(&bar0->mac_cfg);
1671 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1672 if (nic->device_type == XFRAME_II_DEVICE)
1673 writeq(val64, &bar0->mac_cfg);
1675 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1676 writel((u32) (val64), add);
1677 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1678 writel((u32) (val64 >> 32), (add + 4));
1682 * Set the time value to be inserted in the pause frame
1683 * generated by xena.
1685 val64 = readq(&bar0->rmac_pause_cfg);
1686 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1687 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1688 writeq(val64, &bar0->rmac_pause_cfg);
1691 * Set the Threshold Limit for Generating the pause frame
1692 * If the amount of data in any Queue exceeds ratio of
1693 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1694 * pause frame is generated
1697 for (i = 0; i < 4; i++) {
1699 (((u64) 0xFF00 | nic->mac_control.
1700 mc_pause_threshold_q0q3)
1703 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1706 for (i = 0; i < 4; i++) {
1708 (((u64) 0xFF00 | nic->mac_control.
1709 mc_pause_threshold_q4q7)
1712 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1715 * TxDMA will stop Read request if the number of read split has
1716 * exceeded the limit pointed by shared_splits
1718 val64 = readq(&bar0->pic_control);
1719 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1720 writeq(val64, &bar0->pic_control);
1722 if (nic->config.bus_speed == 266) {
1723 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1724 writeq(0x0, &bar0->read_retry_delay);
1725 writeq(0x0, &bar0->write_retry_delay);
1729 * Programming the Herc to split every write transaction
1730 * that does not start on an ADB to reduce disconnects.
1732 if (nic->device_type == XFRAME_II_DEVICE) {
1733 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1734 MISC_LINK_STABILITY_PRD(3);
1735 writeq(val64, &bar0->misc_control);
1736 val64 = readq(&bar0->pic_control2);
1737 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1738 writeq(val64, &bar0->pic_control2);
1740 if (strstr(nic->product_name, "CX4")) {
1741 val64 = TMAC_AVG_IPG(0x17);
1742 writeq(val64, &bar0->tmac_avg_ipg);
1747 #define LINK_UP_DOWN_INTERRUPT 1
1748 #define MAC_RMAC_ERR_TIMER 2
1750 static int s2io_link_fault_indication(struct s2io_nic *nic)
1752 if (nic->intr_type != INTA)
1753 return MAC_RMAC_ERR_TIMER;
1754 if (nic->device_type == XFRAME_II_DEVICE)
1755 return LINK_UP_DOWN_INTERRUPT;
1757 return MAC_RMAC_ERR_TIMER;
1761 * do_s2io_write_bits - update alarm bits in alarm register
1762 * @value: alarm bits
1763 * @flag: interrupt status
1764 * @addr: address value
1765 * Description: update alarm bits in alarm register
1769 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1773 temp64 = readq(addr);
1775 if(flag == ENABLE_INTRS)
1776 temp64 &= ~((u64) value);
1778 temp64 |= ((u64) value);
1779 writeq(temp64, addr);
1782 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1784 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1785 register u64 gen_int_mask = 0;
1787 if (mask & TX_DMA_INTR) {
1789 gen_int_mask |= TXDMA_INT_M;
1791 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1792 TXDMA_PCC_INT | TXDMA_TTI_INT |
1793 TXDMA_LSO_INT | TXDMA_TPA_INT |
1794 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1796 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1797 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1798 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1799 &bar0->pfc_err_mask);
1801 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1802 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1803 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1805 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1806 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1807 PCC_N_SERR | PCC_6_COF_OV_ERR |
1808 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1809 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1810 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1812 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1813 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1815 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1816 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1817 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1818 flag, &bar0->lso_err_mask);
1820 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1821 flag, &bar0->tpa_err_mask);
1823 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1827 if (mask & TX_MAC_INTR) {
1828 gen_int_mask |= TXMAC_INT_M;
1829 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1830 &bar0->mac_int_mask);
1831 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1832 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1833 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1834 flag, &bar0->mac_tmac_err_mask);
1837 if (mask & TX_XGXS_INTR) {
1838 gen_int_mask |= TXXGXS_INT_M;
1839 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1840 &bar0->xgxs_int_mask);
1841 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1842 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1843 flag, &bar0->xgxs_txgxs_err_mask);
1846 if (mask & RX_DMA_INTR) {
1847 gen_int_mask |= RXDMA_INT_M;
1848 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1849 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1850 flag, &bar0->rxdma_int_mask);
1851 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1852 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1853 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1854 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1855 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1856 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1857 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1858 &bar0->prc_pcix_err_mask);
1859 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1860 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1861 &bar0->rpa_err_mask);
1862 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1863 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1864 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1865 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1866 flag, &bar0->rda_err_mask);
1867 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1868 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1869 flag, &bar0->rti_err_mask);
1872 if (mask & RX_MAC_INTR) {
1873 gen_int_mask |= RXMAC_INT_M;
1874 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1875 &bar0->mac_int_mask);
1876 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1877 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1878 RMAC_DOUBLE_ECC_ERR |
1879 RMAC_LINK_STATE_CHANGE_INT,
1880 flag, &bar0->mac_rmac_err_mask);
1883 if (mask & RX_XGXS_INTR)
1885 gen_int_mask |= RXXGXS_INT_M;
1886 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1887 &bar0->xgxs_int_mask);
1888 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1889 &bar0->xgxs_rxgxs_err_mask);
1892 if (mask & MC_INTR) {
1893 gen_int_mask |= MC_INT_M;
1894 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1895 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1896 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1897 &bar0->mc_err_mask);
1899 nic->general_int_mask = gen_int_mask;
1901 /* Remove this line when alarm interrupts are enabled */
1902 nic->general_int_mask = 0;
1905 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1906 * @nic: device private variable,
1907 * @mask: A mask indicating which Intr block must be modified and,
1908 * @flag: A flag indicating whether to enable or disable the Intrs.
1909 * Description: This function will either disable or enable the interrupts
1910 * depending on the flag argument. The mask argument can be used to
1911 * enable/disable any Intr block.
1912 * Return Value: NONE.
1915 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 temp64 = 0, intr_mask = 0;
1920 intr_mask = nic->general_int_mask;
1922 /* Top level interrupt classification */
1923 /* PIC Interrupts */
1924 if (mask & TX_PIC_INTR) {
1925 /* Enable PIC Intrs in the general intr mask register */
1926 intr_mask |= TXPIC_INT_M;
1927 if (flag == ENABLE_INTRS) {
1929 * If Hercules adapter enable GPIO otherwise
1930 * disable all PCIX, Flash, MDIO, IIC and GPIO
1931 * interrupts for now.
1934 if (s2io_link_fault_indication(nic) ==
1935 LINK_UP_DOWN_INTERRUPT ) {
1936 do_s2io_write_bits(PIC_INT_GPIO, flag,
1937 &bar0->pic_int_mask);
1938 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1939 &bar0->gpio_int_mask);
1941 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1942 } else if (flag == DISABLE_INTRS) {
1944 * Disable PIC Intrs in the general
1945 * intr mask register
1947 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1951 /* Tx traffic interrupts */
1952 if (mask & TX_TRAFFIC_INTR) {
1953 intr_mask |= TXTRAFFIC_INT_M;
1954 if (flag == ENABLE_INTRS) {
1956 * Enable all the Tx side interrupts
1957 * writing 0 Enables all 64 TX interrupt levels
1959 writeq(0x0, &bar0->tx_traffic_mask);
1960 } else if (flag == DISABLE_INTRS) {
1962 * Disable Tx Traffic Intrs in the general intr mask
1965 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1969 /* Rx traffic interrupts */
1970 if (mask & RX_TRAFFIC_INTR) {
1971 intr_mask |= RXTRAFFIC_INT_M;
1972 if (flag == ENABLE_INTRS) {
1973 /* writing 0 Enables all 8 RX interrupt levels */
1974 writeq(0x0, &bar0->rx_traffic_mask);
1975 } else if (flag == DISABLE_INTRS) {
1977 * Disable Rx Traffic Intrs in the general intr mask
1980 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1984 temp64 = readq(&bar0->general_int_mask);
1985 if (flag == ENABLE_INTRS)
1986 temp64 &= ~((u64) intr_mask);
1988 temp64 = DISABLE_ALL_INTRS;
1989 writeq(temp64, &bar0->general_int_mask);
1991 nic->general_int_mask = readq(&bar0->general_int_mask);
1995 * verify_pcc_quiescent- Checks for PCC quiescent state
1996 * Return: 1 If PCC is quiescence
1997 * 0 If PCC is not quiescence
1999 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2002 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2003 u64 val64 = readq(&bar0->adapter_status);
2005 herc = (sp->device_type == XFRAME_II_DEVICE);
2007 if (flag == FALSE) {
2008 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2009 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2012 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2016 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2017 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2018 ADAPTER_STATUS_RMAC_PCC_IDLE))
2021 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2022 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2030 * verify_xena_quiescence - Checks whether the H/W is ready
2031 * Description: Returns whether the H/W is ready to go or not. Depending
2032 * on whether adapter enable bit was written or not the comparison
2033 * differs and the calling function passes the input argument flag to
2035 * Return: 1 If xena is quiescence
2036 * 0 If Xena is not quiescence
2039 static int verify_xena_quiescence(struct s2io_nic *sp)
2042 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2043 u64 val64 = readq(&bar0->adapter_status);
2044 mode = s2io_verify_pci_mode(sp);
2046 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2047 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2050 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2051 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2054 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2055 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2058 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2059 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2062 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2063 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2066 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2067 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2070 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2071 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2074 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2075 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2080 * In PCI 33 mode, the P_PLL is not used, and therefore,
2081 * the the P_PLL_LOCK bit in the adapter_status register will
2084 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2085 sp->device_type == XFRAME_II_DEVICE && mode !=
2087 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2090 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2091 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2092 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2099 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2100 * @sp: Pointer to device specifc structure
2102 * New procedure to clear mac address reading problems on Alpha platforms
2106 static void fix_mac_address(struct s2io_nic * sp)
2108 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2112 while (fix_mac[i] != END_SIGN) {
2113 writeq(fix_mac[i++], &bar0->gpio_control);
2115 val64 = readq(&bar0->gpio_control);
2120 * start_nic - Turns the device on
2121 * @nic : device private variable.
2123 * This function actually turns the device on. Before this function is
2124 * called,all Registers are configured from their reset states
2125 * and shared memory is allocated but the NIC is still quiescent. On
2126 * calling this function, the device interrupts are cleared and the NIC is
2127 * literally switched on by writing into the adapter control register.
2129 * SUCCESS on success and -1 on failure.
2132 static int start_nic(struct s2io_nic *nic)
2134 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2135 struct net_device *dev = nic->dev;
2136 register u64 val64 = 0;
2138 struct mac_info *mac_control;
2139 struct config_param *config;
2141 mac_control = &nic->mac_control;
2142 config = &nic->config;
2144 /* PRC Initialization and configuration */
2145 for (i = 0; i < config->rx_ring_num; i++) {
2146 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2147 &bar0->prc_rxd0_n[i]);
2149 val64 = readq(&bar0->prc_ctrl_n[i]);
2150 if (nic->config.bimodal)
2151 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2152 if (nic->rxd_mode == RXD_MODE_1)
2153 val64 |= PRC_CTRL_RC_ENABLED;
2155 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2156 if (nic->device_type == XFRAME_II_DEVICE)
2157 val64 |= PRC_CTRL_GROUP_READS;
2158 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2159 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2160 writeq(val64, &bar0->prc_ctrl_n[i]);
2163 if (nic->rxd_mode == RXD_MODE_3B) {
2164 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2165 val64 = readq(&bar0->rx_pa_cfg);
2166 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2167 writeq(val64, &bar0->rx_pa_cfg);
2170 if (vlan_tag_strip == 0) {
2171 val64 = readq(&bar0->rx_pa_cfg);
2172 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2173 writeq(val64, &bar0->rx_pa_cfg);
2174 vlan_strip_flag = 0;
2178 * Enabling MC-RLDRAM. After enabling the device, we timeout
2179 * for around 100ms, which is approximately the time required
2180 * for the device to be ready for operation.
2182 val64 = readq(&bar0->mc_rldram_mrs);
2183 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2184 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2185 val64 = readq(&bar0->mc_rldram_mrs);
2187 msleep(100); /* Delay by around 100 ms. */
2189 /* Enabling ECC Protection. */
2190 val64 = readq(&bar0->adapter_control);
2191 val64 &= ~ADAPTER_ECC_EN;
2192 writeq(val64, &bar0->adapter_control);
2195 * Verify if the device is ready to be enabled, if so enable
2198 val64 = readq(&bar0->adapter_status);
2199 if (!verify_xena_quiescence(nic)) {
2200 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2201 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2202 (unsigned long long) val64);
2207 * With some switches, link might be already up at this point.
2208 * Because of this weird behavior, when we enable laser,
2209 * we may not get link. We need to handle this. We cannot
2210 * figure out which switch is misbehaving. So we are forced to
2211 * make a global change.
2214 /* Enabling Laser. */
2215 val64 = readq(&bar0->adapter_control);
2216 val64 |= ADAPTER_EOI_TX_ON;
2217 writeq(val64, &bar0->adapter_control);
2219 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2221 * Dont see link state interrupts initally on some switches,
2222 * so directly scheduling the link state task here.
2224 schedule_work(&nic->set_link_task);
2226 /* SXE-002: Initialize link and activity LED */
2227 subid = nic->pdev->subsystem_device;
2228 if (((subid & 0xFF) >= 0x07) &&
2229 (nic->device_type == XFRAME_I_DEVICE)) {
2230 val64 = readq(&bar0->gpio_control);
2231 val64 |= 0x0000800000000000ULL;
2232 writeq(val64, &bar0->gpio_control);
2233 val64 = 0x0411040400000000ULL;
2234 writeq(val64, (void __iomem *)bar0 + 0x2700);
2240 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2242 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2243 TxD *txdlp, int get_off)
2245 struct s2io_nic *nic = fifo_data->nic;
2246 struct sk_buff *skb;
2251 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2252 pci_unmap_single(nic->pdev, (dma_addr_t)
2253 txds->Buffer_Pointer, sizeof(u64),
2258 skb = (struct sk_buff *) ((unsigned long)
2259 txds->Host_Control);
2261 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2264 pci_unmap_single(nic->pdev, (dma_addr_t)
2265 txds->Buffer_Pointer,
2266 skb->len - skb->data_len,
2268 frg_cnt = skb_shinfo(skb)->nr_frags;
2271 for (j = 0; j < frg_cnt; j++, txds++) {
2272 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2273 if (!txds->Buffer_Pointer)
2275 pci_unmap_page(nic->pdev, (dma_addr_t)
2276 txds->Buffer_Pointer,
2277 frag->size, PCI_DMA_TODEVICE);
2280 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2285 * free_tx_buffers - Free all queued Tx buffers
2286 * @nic : device private variable.
2288 * Free all queued Tx buffers.
2289 * Return Value: void
2292 static void free_tx_buffers(struct s2io_nic *nic)
2294 struct net_device *dev = nic->dev;
2295 struct sk_buff *skb;
2298 struct mac_info *mac_control;
2299 struct config_param *config;
2302 mac_control = &nic->mac_control;
2303 config = &nic->config;
2305 for (i = 0; i < config->tx_fifo_num; i++) {
2306 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2307 txdp = (struct TxD *) \
2308 mac_control->fifos[i].list_info[j].list_virt_addr;
2309 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2311 nic->mac_control.stats_info->sw_stat.mem_freed
2318 "%s:forcibly freeing %d skbs on FIFO%d\n",
2320 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2321 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2326 * stop_nic - To stop the nic
2327 * @nic ; device private variable.
2329 * This function does exactly the opposite of what the start_nic()
2330 * function does. This function is called to stop the device.
2335 static void stop_nic(struct s2io_nic *nic)
2337 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2338 register u64 val64 = 0;
2340 struct mac_info *mac_control;
2341 struct config_param *config;
2343 mac_control = &nic->mac_control;
2344 config = &nic->config;
2346 /* Disable all interrupts */
2347 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2348 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2349 interruptible |= TX_PIC_INTR;
2350 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2352 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2353 val64 = readq(&bar0->adapter_control);
2354 val64 &= ~(ADAPTER_CNTL_EN);
2355 writeq(val64, &bar0->adapter_control);
2359 * fill_rx_buffers - Allocates the Rx side skbs
2360 * @nic: device private variable
2361 * @ring_no: ring number
2363 * The function allocates Rx side skbs and puts the physical
2364 * address of these buffers into the RxD buffer pointers, so that the NIC
2365 * can DMA the received frame into these locations.
2366 * The NIC supports 3 receive modes, viz
2368 * 2. three buffer and
2369 * 3. Five buffer modes.
2370 * Each mode defines how many fragments the received frame will be split
2371 * up into by the NIC. The frame is split into L3 header, L4 Header,
2372 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2373 * is split into 3 fragments. As of now only single buffer mode is
2376 * SUCCESS on success or an appropriate -ve value on failure.
2379 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2381 struct net_device *dev = nic->dev;
2382 struct sk_buff *skb;
2384 int off, off1, size, block_no, block_no1;
2387 struct mac_info *mac_control;
2388 struct config_param *config;
2391 unsigned long flags;
2392 struct RxD_t *first_rxdp = NULL;
2393 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2396 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2398 mac_control = &nic->mac_control;
2399 config = &nic->config;
2400 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2401 atomic_read(&nic->rx_bufs_left[ring_no]);
2403 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2404 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2405 while (alloc_tab < alloc_cnt) {
2406 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2408 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2410 rxdp = mac_control->rings[ring_no].
2411 rx_blocks[block_no].rxds[off].virt_addr;
2413 if ((block_no == block_no1) && (off == off1) &&
2414 (rxdp->Host_Control)) {
2415 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2417 DBG_PRINT(INTR_DBG, " info equated\n");
2420 if (off && (off == rxd_count[nic->rxd_mode])) {
2421 mac_control->rings[ring_no].rx_curr_put_info.
2423 if (mac_control->rings[ring_no].rx_curr_put_info.
2424 block_index == mac_control->rings[ring_no].
2426 mac_control->rings[ring_no].rx_curr_put_info.
2428 block_no = mac_control->rings[ring_no].
2429 rx_curr_put_info.block_index;
2430 if (off == rxd_count[nic->rxd_mode])
2432 mac_control->rings[ring_no].rx_curr_put_info.
2434 rxdp = mac_control->rings[ring_no].
2435 rx_blocks[block_no].block_virt_addr;
2436 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2440 spin_lock_irqsave(&nic->put_lock, flags);
2441 mac_control->rings[ring_no].put_pos =
2442 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2443 spin_unlock_irqrestore(&nic->put_lock, flags);
2445 mac_control->rings[ring_no].put_pos =
2446 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2448 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2449 ((nic->rxd_mode == RXD_MODE_3B) &&
2450 (rxdp->Control_2 & BIT(0)))) {
2451 mac_control->rings[ring_no].rx_curr_put_info.
2455 /* calculate size of skb based on ring mode */
2456 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2457 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2458 if (nic->rxd_mode == RXD_MODE_1)
2459 size += NET_IP_ALIGN;
2461 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2464 skb = dev_alloc_skb(size);
2466 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2467 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2470 first_rxdp->Control_1 |= RXD_OWN_XENA;
2472 nic->mac_control.stats_info->sw_stat. \
2473 mem_alloc_fail_cnt++;
2476 nic->mac_control.stats_info->sw_stat.mem_allocated
2478 if (nic->rxd_mode == RXD_MODE_1) {
2479 /* 1 buffer mode - normal operation mode */
2480 rxdp1 = (struct RxD1*)rxdp;
2481 memset(rxdp, 0, sizeof(struct RxD1));
2482 skb_reserve(skb, NET_IP_ALIGN);
2483 rxdp1->Buffer0_ptr = pci_map_single
2484 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2485 PCI_DMA_FROMDEVICE);
2486 if( (rxdp1->Buffer0_ptr == 0) ||
2487 (rxdp1->Buffer0_ptr ==
2489 goto pci_map_failed;
2492 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2494 } else if (nic->rxd_mode == RXD_MODE_3B) {
2497 * 2 buffer mode provides 128
2498 * byte aligned receive buffers.
2501 rxdp3 = (struct RxD3*)rxdp;
2502 /* save buffer pointers to avoid frequent dma mapping */
2503 Buffer0_ptr = rxdp3->Buffer0_ptr;
2504 Buffer1_ptr = rxdp3->Buffer1_ptr;
2505 memset(rxdp, 0, sizeof(struct RxD3));
2506 /* restore the buffer pointers for dma sync*/
2507 rxdp3->Buffer0_ptr = Buffer0_ptr;
2508 rxdp3->Buffer1_ptr = Buffer1_ptr;
2510 ba = &mac_control->rings[ring_no].ba[block_no][off];
2511 skb_reserve(skb, BUF0_LEN);
2512 tmp = (u64)(unsigned long) skb->data;
2515 skb->data = (void *) (unsigned long)tmp;
2516 skb_reset_tail_pointer(skb);
2518 if (!(rxdp3->Buffer0_ptr))
2519 rxdp3->Buffer0_ptr =
2520 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2521 PCI_DMA_FROMDEVICE);
2523 pci_dma_sync_single_for_device(nic->pdev,
2524 (dma_addr_t) rxdp3->Buffer0_ptr,
2525 BUF0_LEN, PCI_DMA_FROMDEVICE);
2526 if( (rxdp3->Buffer0_ptr == 0) ||
2527 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2528 goto pci_map_failed;
2530 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2531 if (nic->rxd_mode == RXD_MODE_3B) {
2532 /* Two buffer mode */
2535 * Buffer2 will have L3/L4 header plus
2538 rxdp3->Buffer2_ptr = pci_map_single
2539 (nic->pdev, skb->data, dev->mtu + 4,
2540 PCI_DMA_FROMDEVICE);
2542 if( (rxdp3->Buffer2_ptr == 0) ||
2543 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2544 goto pci_map_failed;
2546 rxdp3->Buffer1_ptr =
2547 pci_map_single(nic->pdev,
2549 PCI_DMA_FROMDEVICE);
2550 if( (rxdp3->Buffer1_ptr == 0) ||
2551 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2554 (dma_addr_t)rxdp3->Buffer2_ptr,
2556 PCI_DMA_FROMDEVICE);
2557 goto pci_map_failed;
2559 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2560 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2563 rxdp->Control_2 |= BIT(0);
2565 rxdp->Host_Control = (unsigned long) (skb);
2566 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2567 rxdp->Control_1 |= RXD_OWN_XENA;
2569 if (off == (rxd_count[nic->rxd_mode] + 1))
2571 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2573 rxdp->Control_2 |= SET_RXD_MARKER;
2574 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2577 first_rxdp->Control_1 |= RXD_OWN_XENA;
2581 atomic_inc(&nic->rx_bufs_left[ring_no]);
2586 /* Transfer ownership of first descriptor to adapter just before
2587 * exiting. Before that, use memory barrier so that ownership
2588 * and other fields are seen by adapter correctly.
2592 first_rxdp->Control_1 |= RXD_OWN_XENA;
2597 stats->pci_map_fail_cnt++;
2598 stats->mem_freed += skb->truesize;
2599 dev_kfree_skb_irq(skb);
2603 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2605 struct net_device *dev = sp->dev;
2607 struct sk_buff *skb;
2609 struct mac_info *mac_control;
2614 mac_control = &sp->mac_control;
2615 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2616 rxdp = mac_control->rings[ring_no].
2617 rx_blocks[blk].rxds[j].virt_addr;
2618 skb = (struct sk_buff *)
2619 ((unsigned long) rxdp->Host_Control);
2623 if (sp->rxd_mode == RXD_MODE_1) {
2624 rxdp1 = (struct RxD1*)rxdp;
2625 pci_unmap_single(sp->pdev, (dma_addr_t)
2628 HEADER_ETHERNET_II_802_3_SIZE
2629 + HEADER_802_2_SIZE +
2631 PCI_DMA_FROMDEVICE);
2632 memset(rxdp, 0, sizeof(struct RxD1));
2633 } else if(sp->rxd_mode == RXD_MODE_3B) {
2634 rxdp3 = (struct RxD3*)rxdp;
2635 ba = &mac_control->rings[ring_no].
2637 pci_unmap_single(sp->pdev, (dma_addr_t)
2640 PCI_DMA_FROMDEVICE);
2641 pci_unmap_single(sp->pdev, (dma_addr_t)
2644 PCI_DMA_FROMDEVICE);
2645 pci_unmap_single(sp->pdev, (dma_addr_t)
2648 PCI_DMA_FROMDEVICE);
2649 memset(rxdp, 0, sizeof(struct RxD3));
2651 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2653 atomic_dec(&sp->rx_bufs_left[ring_no]);
2658 * free_rx_buffers - Frees all Rx buffers
2659 * @sp: device private variable.
2661 * This function will free all Rx buffers allocated by host.
2666 static void free_rx_buffers(struct s2io_nic *sp)
2668 struct net_device *dev = sp->dev;
2669 int i, blk = 0, buf_cnt = 0;
2670 struct mac_info *mac_control;
2671 struct config_param *config;
2673 mac_control = &sp->mac_control;
2674 config = &sp->config;
2676 for (i = 0; i < config->rx_ring_num; i++) {
2677 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2678 free_rxd_blk(sp,i,blk);
2680 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2681 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2682 mac_control->rings[i].rx_curr_put_info.offset = 0;
2683 mac_control->rings[i].rx_curr_get_info.offset = 0;
2684 atomic_set(&sp->rx_bufs_left[i], 0);
2685 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2686 dev->name, buf_cnt, i);
2691 * s2io_poll - Rx interrupt handler for NAPI support
2692 * @napi : pointer to the napi structure.
2693 * @budget : The number of packets that were budgeted to be processed
2694 * during one pass through the 'Poll" function.
2696 * Comes into picture only if NAPI support has been incorporated. It does
2697 * the same thing that rx_intr_handler does, but not in a interrupt context
2698 * also It will process only a given number of packets.
2700 * 0 on success and 1 if there are No Rx packets to be processed.
2703 static int s2io_poll(struct napi_struct *napi, int budget)
2705 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2706 struct net_device *dev = nic->dev;
2707 int pkt_cnt = 0, org_pkts_to_process;
2708 struct mac_info *mac_control;
2709 struct config_param *config;
2710 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2713 atomic_inc(&nic->isr_cnt);
2714 mac_control = &nic->mac_control;
2715 config = &nic->config;
2717 nic->pkts_to_process = budget;
2718 org_pkts_to_process = nic->pkts_to_process;
2720 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2721 readl(&bar0->rx_traffic_int);
2723 for (i = 0; i < config->rx_ring_num; i++) {
2724 rx_intr_handler(&mac_control->rings[i]);
2725 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2726 if (!nic->pkts_to_process) {
2727 /* Quota for the current iteration has been met */
2732 netif_rx_complete(dev, napi);
2734 for (i = 0; i < config->rx_ring_num; i++) {
2735 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2736 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2737 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2741 /* Re enable the Rx interrupts. */
2742 writeq(0x0, &bar0->rx_traffic_mask);
2743 readl(&bar0->rx_traffic_mask);
2744 atomic_dec(&nic->isr_cnt);
2748 for (i = 0; i < config->rx_ring_num; i++) {
2749 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2750 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2751 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2755 atomic_dec(&nic->isr_cnt);
2759 #ifdef CONFIG_NET_POLL_CONTROLLER
2761 * s2io_netpoll - netpoll event handler entry point
2762 * @dev : pointer to the device structure.
2764 * This function will be called by upper layer to check for events on the
2765 * interface in situations where interrupts are disabled. It is used for
2766 * specific in-kernel networking tasks, such as remote consoles and kernel
2767 * debugging over the network (example netdump in RedHat).
2769 static void s2io_netpoll(struct net_device *dev)
2771 struct s2io_nic *nic = dev->priv;
2772 struct mac_info *mac_control;
2773 struct config_param *config;
2774 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2775 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2778 if (pci_channel_offline(nic->pdev))
2781 disable_irq(dev->irq);
2783 atomic_inc(&nic->isr_cnt);
2784 mac_control = &nic->mac_control;
2785 config = &nic->config;
2787 writeq(val64, &bar0->rx_traffic_int);
2788 writeq(val64, &bar0->tx_traffic_int);
2790 /* we need to free up the transmitted skbufs or else netpoll will
2791 * run out of skbs and will fail and eventually netpoll application such
2792 * as netdump will fail.
2794 for (i = 0; i < config->tx_fifo_num; i++)
2795 tx_intr_handler(&mac_control->fifos[i]);
2797 /* check for received packet and indicate up to network */
2798 for (i = 0; i < config->rx_ring_num; i++)
2799 rx_intr_handler(&mac_control->rings[i]);
2801 for (i = 0; i < config->rx_ring_num; i++) {
2802 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2803 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2804 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2808 atomic_dec(&nic->isr_cnt);
2809 enable_irq(dev->irq);
2815 * rx_intr_handler - Rx interrupt handler
2816 * @nic: device private variable.
2818 * If the interrupt is because of a received frame or if the
2819 * receive ring contains fresh as yet un-processed frames,this function is
2820 * called. It picks out the RxD at which place the last Rx processing had
2821 * stopped and sends the skb to the OSM's Rx handler and then increments
2826 static void rx_intr_handler(struct ring_info *ring_data)
2828 struct s2io_nic *nic = ring_data->nic;
2829 struct net_device *dev = (struct net_device *) nic->dev;
2830 int get_block, put_block, put_offset;
2831 struct rx_curr_get_info get_info, put_info;
2833 struct sk_buff *skb;
2839 spin_lock(&nic->rx_lock);
2840 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2841 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2842 __FUNCTION__, dev->name);
2843 spin_unlock(&nic->rx_lock);
2847 get_info = ring_data->rx_curr_get_info;
2848 get_block = get_info.block_index;
2849 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2850 put_block = put_info.block_index;
2851 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2853 spin_lock(&nic->put_lock);
2854 put_offset = ring_data->put_pos;
2855 spin_unlock(&nic->put_lock);
2857 put_offset = ring_data->put_pos;
2859 while (RXD_IS_UP2DT(rxdp)) {
2861 * If your are next to put index then it's
2862 * FIFO full condition
2864 if ((get_block == put_block) &&
2865 (get_info.offset + 1) == put_info.offset) {
2866 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2869 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2871 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2873 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2874 spin_unlock(&nic->rx_lock);
2877 if (nic->rxd_mode == RXD_MODE_1) {
2878 rxdp1 = (struct RxD1*)rxdp;
2879 pci_unmap_single(nic->pdev, (dma_addr_t)
2882 HEADER_ETHERNET_II_802_3_SIZE +
2885 PCI_DMA_FROMDEVICE);
2886 } else if (nic->rxd_mode == RXD_MODE_3B) {
2887 rxdp3 = (struct RxD3*)rxdp;
2888 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2890 BUF0_LEN, PCI_DMA_FROMDEVICE);
2891 pci_unmap_single(nic->pdev, (dma_addr_t)
2894 PCI_DMA_FROMDEVICE);
2896 prefetch(skb->data);
2897 rx_osm_handler(ring_data, rxdp);
2899 ring_data->rx_curr_get_info.offset = get_info.offset;
2900 rxdp = ring_data->rx_blocks[get_block].
2901 rxds[get_info.offset].virt_addr;
2902 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2903 get_info.offset = 0;
2904 ring_data->rx_curr_get_info.offset = get_info.offset;
2906 if (get_block == ring_data->block_count)
2908 ring_data->rx_curr_get_info.block_index = get_block;
2909 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2912 nic->pkts_to_process -= 1;
2913 if ((napi) && (!nic->pkts_to_process))
2916 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2920 /* Clear all LRO sessions before exiting */
2921 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2922 struct lro *lro = &nic->lro0_n[i];
2924 update_L3L4_header(nic, lro);
2925 queue_rx_frame(lro->parent);
2926 clear_lro_session(lro);
2931 spin_unlock(&nic->rx_lock);
2935 * tx_intr_handler - Transmit interrupt handler
2936 * @nic : device private variable
2938 * If an interrupt was raised to indicate DMA complete of the
2939 * Tx packet, this function is called. It identifies the last TxD
2940 * whose buffer was freed and frees all skbs whose data have already
2941 * DMA'ed into the NICs internal memory.
2946 static void tx_intr_handler(struct fifo_info *fifo_data)
2948 struct s2io_nic *nic = fifo_data->nic;
2949 struct net_device *dev = (struct net_device *) nic->dev;
2950 struct tx_curr_get_info get_info, put_info;
2951 struct sk_buff *skb;
2955 get_info = fifo_data->tx_curr_get_info;
2956 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2957 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2959 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2960 (get_info.offset != put_info.offset) &&
2961 (txdlp->Host_Control)) {
2962 /* Check for TxD errors */
2963 if (txdlp->Control_1 & TXD_T_CODE) {
2964 unsigned long long err;
2965 err = txdlp->Control_1 & TXD_T_CODE;
2967 nic->mac_control.stats_info->sw_stat.
2971 /* update t_code statistics */
2972 err_mask = err >> 48;
2975 nic->mac_control.stats_info->sw_stat.
2980 nic->mac_control.stats_info->sw_stat.
2981 tx_desc_abort_cnt++;
2985 nic->mac_control.stats_info->sw_stat.
2986 tx_parity_err_cnt++;
2990 nic->mac_control.stats_info->sw_stat.
2995 nic->mac_control.stats_info->sw_stat.
2996 tx_list_proc_err_cnt++;
3001 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3003 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3005 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3009 /* Updating the statistics block */
3010 nic->stats.tx_bytes += skb->len;
3011 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3012 dev_kfree_skb_irq(skb);
3015 if (get_info.offset == get_info.fifo_len + 1)
3016 get_info.offset = 0;
3017 txdlp = (struct TxD *) fifo_data->list_info
3018 [get_info.offset].list_virt_addr;
3019 fifo_data->tx_curr_get_info.offset =
3023 spin_lock(&nic->tx_lock);
3024 if (netif_queue_stopped(dev))
3025 netif_wake_queue(dev);
3026 spin_unlock(&nic->tx_lock);
3030 * s2io_mdio_write - Function to write in to MDIO registers
3031 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3032 * @addr : address value
3033 * @value : data value
3034 * @dev : pointer to net_device structure
3036 * This function is used to write values to the MDIO registers
3039 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3042 struct s2io_nic *sp = dev->priv;
3043 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3045 //address transaction
3046 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3047 | MDIO_MMD_DEV_ADDR(mmd_type)
3048 | MDIO_MMS_PRT_ADDR(0x0);
3049 writeq(val64, &bar0->mdio_control);
3050 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3051 writeq(val64, &bar0->mdio_control);
3056 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3057 | MDIO_MMD_DEV_ADDR(mmd_type)
3058 | MDIO_MMS_PRT_ADDR(0x0)
3059 | MDIO_MDIO_DATA(value)
3060 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3061 writeq(val64, &bar0->mdio_control);
3062 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3063 writeq(val64, &bar0->mdio_control);
3067 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3068 | MDIO_MMD_DEV_ADDR(mmd_type)
3069 | MDIO_MMS_PRT_ADDR(0x0)
3070 | MDIO_OP(MDIO_OP_READ_TRANS);
3071 writeq(val64, &bar0->mdio_control);
3072 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3073 writeq(val64, &bar0->mdio_control);
3079 * s2io_mdio_read - Function to write in to MDIO registers
3080 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3081 * @addr : address value
3082 * @dev : pointer to net_device structure
3084 * This function is used to read values to the MDIO registers
3087 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3091 struct s2io_nic *sp = dev->priv;
3092 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3094 /* address transaction */
3095 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3096 | MDIO_MMD_DEV_ADDR(mmd_type)
3097 | MDIO_MMS_PRT_ADDR(0x0);
3098 writeq(val64, &bar0->mdio_control);
3099 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3100 writeq(val64, &bar0->mdio_control);
3103 /* Data transaction */
3105 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3106 | MDIO_MMD_DEV_ADDR(mmd_type)
3107 | MDIO_MMS_PRT_ADDR(0x0)
3108 | MDIO_OP(MDIO_OP_READ_TRANS);
3109 writeq(val64, &bar0->mdio_control);
3110 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3111 writeq(val64, &bar0->mdio_control);
3114 /* Read the value from regs */
3115 rval64 = readq(&bar0->mdio_control);
3116 rval64 = rval64 & 0xFFFF0000;
3117 rval64 = rval64 >> 16;
3121 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3122 * @counter : couter value to be updated
3123 * @flag : flag to indicate the status
3124 * @type : counter type
3126 * This function is to check the status of the xpak counters value
3130 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3135 for(i = 0; i <index; i++)
3140 *counter = *counter + 1;
3141 val64 = *regs_stat & mask;
3142 val64 = val64 >> (index * 0x2);
3149 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3150 "service. Excessive temperatures may "
3151 "result in premature transceiver "
3155 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3156 "service Excessive bias currents may "
3157 "indicate imminent laser diode "
3161 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3162 "service Excessive laser output "
3163 "power may saturate far-end "
3167 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3172 val64 = val64 << (index * 0x2);
3173 *regs_stat = (*regs_stat & (~mask)) | (val64);
3176 *regs_stat = *regs_stat & (~mask);
3181 * s2io_updt_xpak_counter - Function to update the xpak counters
3182 * @dev : pointer to net_device struct
3184 * This function is to upate the status of the xpak counters value
3187 static void s2io_updt_xpak_counter(struct net_device *dev)
3195 struct s2io_nic *sp = dev->priv;
3196 struct stat_block *stat_info = sp->mac_control.stats_info;
3198 /* Check the communication with the MDIO slave */
3201 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3202 if((val64 == 0xFFFF) || (val64 == 0x0000))
3204 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3205 "Returned %llx\n", (unsigned long long)val64);
3209 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3212 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3213 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3214 (unsigned long long)val64);
3218 /* Loading the DOM register to MDIO register */
3220 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3221 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3223 /* Reading the Alarm flags */
3226 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3228 flag = CHECKBIT(val64, 0x7);
3230 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3231 &stat_info->xpak_stat.xpak_regs_stat,
3234 if(CHECKBIT(val64, 0x6))
3235 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3237 flag = CHECKBIT(val64, 0x3);
3239 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3240 &stat_info->xpak_stat.xpak_regs_stat,
3243 if(CHECKBIT(val64, 0x2))
3244 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3246 flag = CHECKBIT(val64, 0x1);
3248 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3249 &stat_info->xpak_stat.xpak_regs_stat,
3252 if(CHECKBIT(val64, 0x0))
3253 stat_info->xpak_stat.alarm_laser_output_power_low++;
3255 /* Reading the Warning flags */
3258 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3260 if(CHECKBIT(val64, 0x7))
3261 stat_info->xpak_stat.warn_transceiver_temp_high++;
3263 if(CHECKBIT(val64, 0x6))
3264 stat_info->xpak_stat.warn_transceiver_temp_low++;
3266 if(CHECKBIT(val64, 0x3))
3267 stat_info->xpak_stat.warn_laser_bias_current_high++;
3269 if(CHECKBIT(val64, 0x2))
3270 stat_info->xpak_stat.warn_laser_bias_current_low++;
3272 if(CHECKBIT(val64, 0x1))
3273 stat_info->xpak_stat.warn_laser_output_power_high++;
3275 if(CHECKBIT(val64, 0x0))
3276 stat_info->xpak_stat.warn_laser_output_power_low++;
3280 * wait_for_cmd_complete - waits for a command to complete.
3281 * @sp : private member of the device structure, which is a pointer to the
3282 * s2io_nic structure.
3283 * Description: Function that waits for a command to Write into RMAC
3284 * ADDR DATA registers to be completed and returns either success or
3285 * error depending on whether the command was complete or not.
3287 * SUCCESS on success and FAILURE on failure.
3290 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3293 int ret = FAILURE, cnt = 0, delay = 1;
3296 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3300 val64 = readq(addr);
3301 if (bit_state == S2IO_BIT_RESET) {
3302 if (!(val64 & busy_bit)) {
3307 if (!(val64 & busy_bit)) {
3324 * check_pci_device_id - Checks if the device id is supported
3326 * Description: Function to check if the pci device id is supported by driver.
3327 * Return value: Actual device id if supported else PCI_ANY_ID
3329 static u16 check_pci_device_id(u16 id)
3332 case PCI_DEVICE_ID_HERC_WIN:
3333 case PCI_DEVICE_ID_HERC_UNI:
3334 return XFRAME_II_DEVICE;
3335 case PCI_DEVICE_ID_S2IO_UNI:
3336 case PCI_DEVICE_ID_S2IO_WIN:
3337 return XFRAME_I_DEVICE;
3344 * s2io_reset - Resets the card.
3345 * @sp : private member of the device structure.
3346 * Description: Function to Reset the card. This function then also
3347 * restores the previously saved PCI configuration space registers as
3348 * the card reset also resets the configuration space.
3353 static void s2io_reset(struct s2io_nic * sp)
3355 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3360 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3361 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3363 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3364 __FUNCTION__, sp->dev->name);
3366 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3367 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3369 val64 = SW_RESET_ALL;
3370 writeq(val64, &bar0->sw_reset);
3371 if (strstr(sp->product_name, "CX4")) {
3375 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3377 /* Restore the PCI state saved during initialization. */
3378 pci_restore_state(sp->pdev);
3379 pci_read_config_word(sp->pdev, 0x2, &val16);
3380 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3385 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3386 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3389 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3393 /* Set swapper to enable I/O register access */
3394 s2io_set_swapper(sp);
3396 /* Restore the MSIX table entries from local variables */
3397 restore_xmsi_data(sp);
3399 /* Clear certain PCI/PCI-X fields after reset */
3400 if (sp->device_type == XFRAME_II_DEVICE) {
3401 /* Clear "detected parity error" bit */
3402 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3404 /* Clearing PCIX Ecc status register */
3405 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3407 /* Clearing PCI_STATUS error reflected here */
3408 writeq(BIT(62), &bar0->txpic_int_reg);
3411 /* Reset device statistics maintained by OS */
3412 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3414 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3415 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3416 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3417 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3418 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3419 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3420 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3421 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3422 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3423 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3424 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3425 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3426 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3427 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3428 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3429 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3430 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3431 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3432 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3434 /* SXE-002: Configure link and activity LED to turn it off */
3435 subid = sp->pdev->subsystem_device;
3436 if (((subid & 0xFF) >= 0x07) &&
3437 (sp->device_type == XFRAME_I_DEVICE)) {
3438 val64 = readq(&bar0->gpio_control);
3439 val64 |= 0x0000800000000000ULL;
3440 writeq(val64, &bar0->gpio_control);
3441 val64 = 0x0411040400000000ULL;
3442 writeq(val64, (void __iomem *)bar0 + 0x2700);
3446 * Clear spurious ECC interrupts that would have occured on
3447 * XFRAME II cards after reset.
3449 if (sp->device_type == XFRAME_II_DEVICE) {
3450 val64 = readq(&bar0->pcc_err_reg);
3451 writeq(val64, &bar0->pcc_err_reg);
3454 /* restore the previously assigned mac address */
3455 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3457 sp->device_enabled_once = FALSE;
3461 * s2io_set_swapper - to set the swapper controle on the card
3462 * @sp : private member of the device structure,
3463 * pointer to the s2io_nic structure.
3464 * Description: Function to set the swapper control on the card
3465 * correctly depending on the 'endianness' of the system.
3467 * SUCCESS on success and FAILURE on failure.
3470 static int s2io_set_swapper(struct s2io_nic * sp)
3472 struct net_device *dev = sp->dev;
3473 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3474 u64 val64, valt, valr;
3477 * Set proper endian settings and verify the same by reading
3478 * the PIF Feed-back register.
3481 val64 = readq(&bar0->pif_rd_swapper_fb);
3482 if (val64 != 0x0123456789ABCDEFULL) {
3484 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3485 0x8100008181000081ULL, /* FE=1, SE=0 */
3486 0x4200004242000042ULL, /* FE=0, SE=1 */
3487 0}; /* FE=0, SE=0 */
3490 writeq(value[i], &bar0->swapper_ctrl);
3491 val64 = readq(&bar0->pif_rd_swapper_fb);
3492 if (val64 == 0x0123456789ABCDEFULL)
3497 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3499 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3500 (unsigned long long) val64);
3505 valr = readq(&bar0->swapper_ctrl);
3508 valt = 0x0123456789ABCDEFULL;
3509 writeq(valt, &bar0->xmsi_address);
3510 val64 = readq(&bar0->xmsi_address);
3514 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3515 0x0081810000818100ULL, /* FE=1, SE=0 */
3516 0x0042420000424200ULL, /* FE=0, SE=1 */
3517 0}; /* FE=0, SE=0 */
3520 writeq((value[i] | valr), &bar0->swapper_ctrl);
3521 writeq(valt, &bar0->xmsi_address);
3522 val64 = readq(&bar0->xmsi_address);
3528 unsigned long long x = val64;
3529 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3530 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3534 val64 = readq(&bar0->swapper_ctrl);
3535 val64 &= 0xFFFF000000000000ULL;
3539 * The device by default set to a big endian format, so a
3540 * big endian driver need not set anything.
3542 val64 |= (SWAPPER_CTRL_TXP_FE |
3543 SWAPPER_CTRL_TXP_SE |
3544 SWAPPER_CTRL_TXD_R_FE |
3545 SWAPPER_CTRL_TXD_W_FE |
3546 SWAPPER_CTRL_TXF_R_FE |
3547 SWAPPER_CTRL_RXD_R_FE |
3548 SWAPPER_CTRL_RXD_W_FE |
3549 SWAPPER_CTRL_RXF_W_FE |
3550 SWAPPER_CTRL_XMSI_FE |
3551 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3552 if (sp->intr_type == INTA)
3553 val64 |= SWAPPER_CTRL_XMSI_SE;
3554 writeq(val64, &bar0->swapper_ctrl);
3557 * Initially we enable all bits to make it accessible by the
3558 * driver, then we selectively enable only those bits that
3561 val64 |= (SWAPPER_CTRL_TXP_FE |
3562 SWAPPER_CTRL_TXP_SE |
3563 SWAPPER_CTRL_TXD_R_FE |
3564 SWAPPER_CTRL_TXD_R_SE |
3565 SWAPPER_CTRL_TXD_W_FE |
3566 SWAPPER_CTRL_TXD_W_SE |
3567 SWAPPER_CTRL_TXF_R_FE |
3568 SWAPPER_CTRL_RXD_R_FE |
3569 SWAPPER_CTRL_RXD_R_SE |
3570 SWAPPER_CTRL_RXD_W_FE |
3571 SWAPPER_CTRL_RXD_W_SE |
3572 SWAPPER_CTRL_RXF_W_FE |
3573 SWAPPER_CTRL_XMSI_FE |
3574 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3575 if (sp->intr_type == INTA)
3576 val64 |= SWAPPER_CTRL_XMSI_SE;
3577 writeq(val64, &bar0->swapper_ctrl);
3579 val64 = readq(&bar0->swapper_ctrl);
3582 * Verifying if endian settings are accurate by reading a
3583 * feedback register.
3585 val64 = readq(&bar0->pif_rd_swapper_fb);
3586 if (val64 != 0x0123456789ABCDEFULL) {
3587 /* Endian settings are incorrect, calls for another dekko. */
3588 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3590 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3591 (unsigned long long) val64);
3598 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3600 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3602 int ret = 0, cnt = 0;
3605 val64 = readq(&bar0->xmsi_access);
3606 if (!(val64 & BIT(15)))
3612 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3619 static void restore_xmsi_data(struct s2io_nic *nic)
3621 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3625 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3626 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3627 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3628 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3629 writeq(val64, &bar0->xmsi_access);
3630 if (wait_for_msix_trans(nic, i)) {
3631 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3637 static void store_xmsi_data(struct s2io_nic *nic)
3639 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3640 u64 val64, addr, data;
3643 /* Store and display */
3644 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3645 val64 = (BIT(15) | vBIT(i, 26, 6));
3646 writeq(val64, &bar0->xmsi_access);
3647 if (wait_for_msix_trans(nic, i)) {
3648 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3651 addr = readq(&bar0->xmsi_address);
3652 data = readq(&bar0->xmsi_data);
3654 nic->msix_info[i].addr = addr;
3655 nic->msix_info[i].data = data;
3660 static int s2io_enable_msi_x(struct s2io_nic *nic)
3662 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3664 u16 msi_control; /* Temp variable */
3665 int ret, i, j, msix_indx = 1;
3667 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3669 if (nic->entries == NULL) {
3670 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3672 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3675 nic->mac_control.stats_info->sw_stat.mem_allocated
3676 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3677 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3680 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3682 if (nic->s2io_entries == NULL) {
3683 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3685 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3686 kfree(nic->entries);
3687 nic->mac_control.stats_info->sw_stat.mem_freed
3688 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3691 nic->mac_control.stats_info->sw_stat.mem_allocated
3692 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3693 memset(nic->s2io_entries, 0,
3694 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3696 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3697 nic->entries[i].entry = i;
3698 nic->s2io_entries[i].entry = i;
3699 nic->s2io_entries[i].arg = NULL;
3700 nic->s2io_entries[i].in_use = 0;
3703 tx_mat = readq(&bar0->tx_mat0_n[0]);
3704 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3705 tx_mat |= TX_MAT_SET(i, msix_indx);
3706 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3707 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3708 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3710 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3712 if (!nic->config.bimodal) {
3713 rx_mat = readq(&bar0->rx_mat);
3714 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3715 rx_mat |= RX_MAT_SET(j, msix_indx);
3716 nic->s2io_entries[msix_indx].arg
3717 = &nic->mac_control.rings[j];
3718 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3719 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3721 writeq(rx_mat, &bar0->rx_mat);
3723 tx_mat = readq(&bar0->tx_mat0_n[7]);
3724 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3725 tx_mat |= TX_MAT_SET(i, msix_indx);
3726 nic->s2io_entries[msix_indx].arg
3727 = &nic->mac_control.rings[j];
3728 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3729 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3731 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3734 nic->avail_msix_vectors = 0;
3735 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3736 /* We fail init if error or we get less vectors than min required */
3737 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3738 nic->avail_msix_vectors = ret;
3739 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3742 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3743 kfree(nic->entries);
3744 nic->mac_control.stats_info->sw_stat.mem_freed
3745 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3746 kfree(nic->s2io_entries);
3747 nic->mac_control.stats_info->sw_stat.mem_freed
3748 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3749 nic->entries = NULL;
3750 nic->s2io_entries = NULL;
3751 nic->avail_msix_vectors = 0;
3754 if (!nic->avail_msix_vectors)
3755 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3758 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3759 * in the herc NIC. (Temp change, needs to be removed later)
3761 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3762 msi_control |= 0x1; /* Enable MSI */
3763 pci_write_config_word(nic->pdev, 0x42, msi_control);
3768 /* Handle software interrupt used during MSI(X) test */
3769 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3771 struct s2io_nic *sp = dev_id;
3773 sp->msi_detected = 1;
3774 wake_up(&sp->msi_wait);
3779 /* Test interrupt path by forcing a a software IRQ */
3780 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3782 struct pci_dev *pdev = sp->pdev;
3783 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3787 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3790 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3791 sp->dev->name, pci_name(pdev), pdev->irq);
3795 init_waitqueue_head (&sp->msi_wait);
3796 sp->msi_detected = 0;
3798 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3799 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3800 val64 |= SCHED_INT_CTRL_TIMER_EN;
3801 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3802 writeq(val64, &bar0->scheduled_int_ctrl);
3804 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3806 if (!sp->msi_detected) {
3807 /* MSI(X) test failed, go back to INTx mode */
3808 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3809 "using MSI(X) during test\n", sp->dev->name,
3815 free_irq(sp->entries[1].vector, sp);
3817 writeq(saved64, &bar0->scheduled_int_ctrl);
3821 /* ********************************************************* *
3822 * Functions defined below concern the OS part of the driver *
3823 * ********************************************************* */
3826 * s2io_open - open entry point of the driver
3827 * @dev : pointer to the device structure.
3829 * This function is the open entry point of the driver. It mainly calls a
3830 * function to allocate Rx buffers and inserts them into the buffer
3831 * descriptors and then enables the Rx part of the NIC.
3833 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3837 static int s2io_open(struct net_device *dev)
3839 struct s2io_nic *sp = dev->priv;
3843 * Make sure you have link off by default every time
3844 * Nic is initialized
3846 netif_carrier_off(dev);
3847 sp->last_link_state = 0;
3849 napi_enable(&sp->napi);
3851 if (sp->intr_type == MSI_X) {
3852 int ret = s2io_enable_msi_x(sp);
3857 ret = s2io_test_msi(sp);
3859 /* rollback MSI-X, will re-enable during add_isr() */
3861 sp->mac_control.stats_info->sw_stat.mem_freed +=
3862 (MAX_REQUESTED_MSI_X *
3863 sizeof(struct msix_entry));
3864 kfree(sp->s2io_entries);
3865 sp->mac_control.stats_info->sw_stat.mem_freed +=
3866 (MAX_REQUESTED_MSI_X *
3867 sizeof(struct s2io_msix_entry));
3869 sp->s2io_entries = NULL;
3871 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3872 msi_control &= 0xFFFE; /* Disable MSI */
3873 pci_write_config_word(sp->pdev, 0x42, msi_control);
3875 pci_disable_msix(sp->pdev);
3881 "%s: MSI-X requested but failed to enable\n",
3883 sp->intr_type = INTA;
3887 /* NAPI doesn't work well with MSI(X) */
3888 if (sp->intr_type != INTA) {
3890 sp->config.napi = 0;
3893 /* Initialize H/W and enable interrupts */
3894 err = s2io_card_up(sp);
3896 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3898 goto hw_init_failed;
3901 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3902 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3905 goto hw_init_failed;
3908 netif_start_queue(dev);
3912 napi_disable(&sp->napi);
3913 if (sp->intr_type == MSI_X) {
3916 sp->mac_control.stats_info->sw_stat.mem_freed
3917 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3919 if (sp->s2io_entries) {
3920 kfree(sp->s2io_entries);
3921 sp->mac_control.stats_info->sw_stat.mem_freed
3922 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3929 * s2io_close -close entry point of the driver
3930 * @dev : device pointer.
3932 * This is the stop entry point of the driver. It needs to undo exactly
3933 * whatever was done by the open entry point,thus it's usually referred to
3934 * as the close function.Among other things this function mainly stops the
3935 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3937 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3941 static int s2io_close(struct net_device *dev)
3943 struct s2io_nic *sp = dev->priv;
3945 netif_stop_queue(dev);
3946 napi_disable(&sp->napi);
3947 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3954 * s2io_xmit - Tx entry point of te driver
3955 * @skb : the socket buffer containing the Tx data.
3956 * @dev : device pointer.
3958 * This function is the Tx entry point of the driver. S2IO NIC supports
3959 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3960 * NOTE: when device cant queue the pkt,just the trans_start variable will
3963 * 0 on success & 1 on failure.
3966 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3968 struct s2io_nic *sp = dev->priv;
3969 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3972 struct TxFIFO_element __iomem *tx_fifo;
3973 unsigned long flags;
3975 int vlan_priority = 0;
3976 struct mac_info *mac_control;
3977 struct config_param *config;
3979 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3981 mac_control = &sp->mac_control;
3982 config = &sp->config;
3984 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3986 if (unlikely(skb->len <= 0)) {
3987 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3988 dev_kfree_skb_any(skb);
3992 spin_lock_irqsave(&sp->tx_lock, flags);
3993 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3994 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3996 spin_unlock_irqrestore(&sp->tx_lock, flags);
4002 /* Get Fifo number to Transmit based on vlan priority */
4003 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4004 vlan_tag = vlan_tx_tag_get(skb);
4005 vlan_priority = vlan_tag >> 13;
4006 queue = config->fifo_mapping[vlan_priority];
4009 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4010 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4011 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4014 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4015 /* Avoid "put" pointer going beyond "get" pointer */
4016 if (txdp->Host_Control ||
4017 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4018 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4019 netif_stop_queue(dev);
4021 spin_unlock_irqrestore(&sp->tx_lock, flags);
4025 offload_type = s2io_offload_type(skb);
4026 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4027 txdp->Control_1 |= TXD_TCP_LSO_EN;
4028 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4032 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4035 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4036 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4037 txdp->Control_2 |= config->tx_intr_type;
4039 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4040 txdp->Control_2 |= TXD_VLAN_ENABLE;
4041 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4044 frg_len = skb->len - skb->data_len;
4045 if (offload_type == SKB_GSO_UDP) {
4048 ufo_size = s2io_udp_mss(skb);
4050 txdp->Control_1 |= TXD_UFO_EN;
4051 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4052 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4054 sp->ufo_in_band_v[put_off] =
4055 (u64)skb_shinfo(skb)->ip6_frag_id;
4057 sp->ufo_in_band_v[put_off] =
4058 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4060 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4061 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4063 sizeof(u64), PCI_DMA_TODEVICE);
4064 if((txdp->Buffer_Pointer == 0) ||
4065 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4066 goto pci_map_failed;
4070 txdp->Buffer_Pointer = pci_map_single
4071 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4072 if((txdp->Buffer_Pointer == 0) ||
4073 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4074 goto pci_map_failed;
4076 txdp->Host_Control = (unsigned long) skb;
4077 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4078 if (offload_type == SKB_GSO_UDP)
4079 txdp->Control_1 |= TXD_UFO_EN;
4081 frg_cnt = skb_shinfo(skb)->nr_frags;
4082 /* For fragmented SKB. */
4083 for (i = 0; i < frg_cnt; i++) {
4084 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4085 /* A '0' length fragment will be ignored */
4089 txdp->Buffer_Pointer = (u64) pci_map_page
4090 (sp->pdev, frag->page, frag->page_offset,
4091 frag->size, PCI_DMA_TODEVICE);
4092 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4093 if (offload_type == SKB_GSO_UDP)
4094 txdp->Control_1 |= TXD_UFO_EN;
4096 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4098 if (offload_type == SKB_GSO_UDP)
4099 frg_cnt++; /* as Txd0 was used for inband header */
4101 tx_fifo = mac_control->tx_FIFO_start[queue];
4102 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4103 writeq(val64, &tx_fifo->TxDL_Pointer);
4105 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4108 val64 |= TX_FIFO_SPECIAL_FUNC;
4110 writeq(val64, &tx_fifo->List_Control);
4115 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4117 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4119 /* Avoid "put" pointer going beyond "get" pointer */
4120 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4121 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4123 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4125 netif_stop_queue(dev);
4127 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4128 dev->trans_start = jiffies;
4129 spin_unlock_irqrestore(&sp->tx_lock, flags);
4133 stats->pci_map_fail_cnt++;
4134 netif_stop_queue(dev);
4135 stats->mem_freed += skb->truesize;
4137 spin_unlock_irqrestore(&sp->tx_lock, flags);
4142 s2io_alarm_handle(unsigned long data)
4144 struct s2io_nic *sp = (struct s2io_nic *)data;
4145 struct net_device *dev = sp->dev;
4147 s2io_handle_errors(dev);
4148 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4151 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4153 int rxb_size, level;
4156 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4157 level = rx_buffer_level(sp, rxb_size, rng_n);
4159 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4161 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4162 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4163 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4164 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4166 clear_bit(0, (&sp->tasklet_status));
4169 clear_bit(0, (&sp->tasklet_status));
4170 } else if (level == LOW)
4171 tasklet_schedule(&sp->task);
4173 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4174 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4175 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4180 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4182 struct ring_info *ring = (struct ring_info *)dev_id;
4183 struct s2io_nic *sp = ring->nic;
4185 atomic_inc(&sp->isr_cnt);
4187 rx_intr_handler(ring);
4188 s2io_chk_rx_buffers(sp, ring->ring_no);
4190 atomic_dec(&sp->isr_cnt);
4194 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4196 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4197 struct s2io_nic *sp = fifo->nic;
4199 atomic_inc(&sp->isr_cnt);
4200 tx_intr_handler(fifo);
4201 atomic_dec(&sp->isr_cnt);
4204 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4206 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4209 val64 = readq(&bar0->pic_int_status);
4210 if (val64 & PIC_INT_GPIO) {
4211 val64 = readq(&bar0->gpio_int_reg);
4212 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4213 (val64 & GPIO_INT_REG_LINK_UP)) {
4215 * This is unstable state so clear both up/down
4216 * interrupt and adapter to re-evaluate the link state.
4218 val64 |= GPIO_INT_REG_LINK_DOWN;
4219 val64 |= GPIO_INT_REG_LINK_UP;
4220 writeq(val64, &bar0->gpio_int_reg);
4221 val64 = readq(&bar0->gpio_int_mask);
4222 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4223 GPIO_INT_MASK_LINK_DOWN);
4224 writeq(val64, &bar0->gpio_int_mask);
4226 else if (val64 & GPIO_INT_REG_LINK_UP) {
4227 val64 = readq(&bar0->adapter_status);
4228 /* Enable Adapter */
4229 val64 = readq(&bar0->adapter_control);
4230 val64 |= ADAPTER_CNTL_EN;
4231 writeq(val64, &bar0->adapter_control);
4232 val64 |= ADAPTER_LED_ON;
4233 writeq(val64, &bar0->adapter_control);
4234 if (!sp->device_enabled_once)
4235 sp->device_enabled_once = 1;
4237 s2io_link(sp, LINK_UP);
4239 * unmask link down interrupt and mask link-up
4242 val64 = readq(&bar0->gpio_int_mask);
4243 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4244 val64 |= GPIO_INT_MASK_LINK_UP;
4245 writeq(val64, &bar0->gpio_int_mask);
4247 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4248 val64 = readq(&bar0->adapter_status);
4249 s2io_link(sp, LINK_DOWN);
4250 /* Link is down so unmaks link up interrupt */
4251 val64 = readq(&bar0->gpio_int_mask);
4252 val64 &= ~GPIO_INT_MASK_LINK_UP;
4253 val64 |= GPIO_INT_MASK_LINK_DOWN;
4254 writeq(val64, &bar0->gpio_int_mask);
4257 val64 = readq(&bar0->adapter_control);
4258 val64 = val64 &(~ADAPTER_LED_ON);
4259 writeq(val64, &bar0->adapter_control);
4262 val64 = readq(&bar0->gpio_int_mask);
4266 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4267 * @value: alarm bits
4268 * @addr: address value
4269 * @cnt: counter variable
4270 * Description: Check for alarm and increment the counter
4272 * 1 - if alarm bit set
4273 * 0 - if alarm bit is not set
4275 int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4276 unsigned long long *cnt)
4279 val64 = readq(addr);
4280 if ( val64 & value ) {
4281 writeq(val64, addr);
4290 * s2io_handle_errors - Xframe error indication handler
4291 * @nic: device private variable
4292 * Description: Handle alarms such as loss of link, single or
4293 * double ECC errors, critical and serious errors.
4297 static void s2io_handle_errors(void * dev_id)
4299 struct net_device *dev = (struct net_device *) dev_id;
4300 struct s2io_nic *sp = dev->priv;
4301 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4302 u64 temp64 = 0,val64=0;
4305 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4306 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4308 if (unlikely(atomic_read(&sp->card_state) == CARD_DOWN))
4311 if (pci_channel_offline(sp->pdev))
4314 memset(&sw_stat->ring_full_cnt, 0,
4315 sizeof(sw_stat->ring_full_cnt));
4317 /* Handling the XPAK counters update */
4318 if(stats->xpak_timer_count < 72000) {
4319 /* waiting for an hour */
4320 stats->xpak_timer_count++;
4322 s2io_updt_xpak_counter(dev);
4323 /* reset the count to zero */
4324 stats->xpak_timer_count = 0;
4327 /* Handling link status change error Intr */
4328 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4329 val64 = readq(&bar0->mac_rmac_err_reg);
4330 writeq(val64, &bar0->mac_rmac_err_reg);
4331 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4332 schedule_work(&sp->set_link_task);
4335 /* In case of a serious error, the device will be Reset. */
4336 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4337 &sw_stat->serious_err_cnt))
4340 /* Check for data parity error */
4341 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4342 &sw_stat->parity_err_cnt))
4345 /* Check for ring full counter */
4346 if (sp->device_type == XFRAME_II_DEVICE) {
4347 val64 = readq(&bar0->ring_bump_counter1);
4348 for (i=0; i<4; i++) {
4349 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4350 temp64 >>= 64 - ((i+1)*16);
4351 sw_stat->ring_full_cnt[i] += temp64;
4354 val64 = readq(&bar0->ring_bump_counter2);
4355 for (i=0; i<4; i++) {
4356 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4357 temp64 >>= 64 - ((i+1)*16);
4358 sw_stat->ring_full_cnt[i+4] += temp64;
4362 val64 = readq(&bar0->txdma_int_status);
4363 /*check for pfc_err*/
4364 if (val64 & TXDMA_PFC_INT) {
4365 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4366 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4367 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4368 &sw_stat->pfc_err_cnt))
4370 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4371 &sw_stat->pfc_err_cnt);
4374 /*check for tda_err*/
4375 if (val64 & TXDMA_TDA_INT) {
4376 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4377 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4378 &sw_stat->tda_err_cnt))
4380 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4381 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4383 /*check for pcc_err*/
4384 if (val64 & TXDMA_PCC_INT) {
4385 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4386 | PCC_N_SERR | PCC_6_COF_OV_ERR
4387 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4388 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4389 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4390 &sw_stat->pcc_err_cnt))
4392 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4393 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4396 /*check for tti_err*/
4397 if (val64 & TXDMA_TTI_INT) {
4398 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4399 &sw_stat->tti_err_cnt))
4401 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4402 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4405 /*check for lso_err*/
4406 if (val64 & TXDMA_LSO_INT) {
4407 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4408 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4409 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4411 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4412 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4415 /*check for tpa_err*/
4416 if (val64 & TXDMA_TPA_INT) {
4417 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4418 &sw_stat->tpa_err_cnt))
4420 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4421 &sw_stat->tpa_err_cnt);
4424 /*check for sm_err*/
4425 if (val64 & TXDMA_SM_INT) {
4426 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4427 &sw_stat->sm_err_cnt))
4431 val64 = readq(&bar0->mac_int_status);
4432 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4433 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4434 &bar0->mac_tmac_err_reg,
4435 &sw_stat->mac_tmac_err_cnt))
4437 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4438 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4439 &bar0->mac_tmac_err_reg,
4440 &sw_stat->mac_tmac_err_cnt);
4443 val64 = readq(&bar0->xgxs_int_status);
4444 if (val64 & XGXS_INT_STATUS_TXGXS) {
4445 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4446 &bar0->xgxs_txgxs_err_reg,
4447 &sw_stat->xgxs_txgxs_err_cnt))
4449 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4450 &bar0->xgxs_txgxs_err_reg,
4451 &sw_stat->xgxs_txgxs_err_cnt);
4454 val64 = readq(&bar0->rxdma_int_status);
4455 if (val64 & RXDMA_INT_RC_INT_M) {
4456 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4457 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4458 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4460 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4461 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4462 &sw_stat->rc_err_cnt);
4463 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4464 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4465 &sw_stat->prc_pcix_err_cnt))
4467 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4468 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4469 &sw_stat->prc_pcix_err_cnt);
4472 if (val64 & RXDMA_INT_RPA_INT_M) {
4473 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4474 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4476 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4477 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4480 if (val64 & RXDMA_INT_RDA_INT_M) {
4481 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4482 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4483 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4484 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4486 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4487 | RDA_MISC_ERR | RDA_PCIX_ERR,
4488 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4491 if (val64 & RXDMA_INT_RTI_INT_M) {
4492 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4493 &sw_stat->rti_err_cnt))
4495 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4496 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4499 val64 = readq(&bar0->mac_int_status);
4500 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4501 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4502 &bar0->mac_rmac_err_reg,
4503 &sw_stat->mac_rmac_err_cnt))
4505 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4506 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4507 &sw_stat->mac_rmac_err_cnt);
4510 val64 = readq(&bar0->xgxs_int_status);
4511 if (val64 & XGXS_INT_STATUS_RXGXS) {
4512 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4513 &bar0->xgxs_rxgxs_err_reg,
4514 &sw_stat->xgxs_rxgxs_err_cnt))
4518 val64 = readq(&bar0->mc_int_status);
4519 if(val64 & MC_INT_STATUS_MC_INT) {
4520 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4521 &sw_stat->mc_err_cnt))
4524 /* Handling Ecc errors */
4525 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4526 writeq(val64, &bar0->mc_err_reg);
4527 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4528 sw_stat->double_ecc_errs++;
4529 if (sp->device_type != XFRAME_II_DEVICE) {
4531 * Reset XframeI only if critical error
4534 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4535 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4539 sw_stat->single_ecc_errs++;
4545 netif_stop_queue(dev);
4546 schedule_work(&sp->rst_timer_task);
4547 sw_stat->soft_reset_cnt++;
4552 * s2io_isr - ISR handler of the device .
4553 * @irq: the irq of the device.
4554 * @dev_id: a void pointer to the dev structure of the NIC.
4555 * Description: This function is the ISR handler of the device. It
4556 * identifies the reason for the interrupt and calls the relevant
4557 * service routines. As a contongency measure, this ISR allocates the
4558 * recv buffers, if their numbers are below the panic value which is
4559 * presently set to 25% of the original number of rcv buffers allocated.
4561 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4562 * IRQ_NONE: will be returned if interrupt is not from our device
4564 static irqreturn_t s2io_isr(int irq, void *dev_id)
4566 struct net_device *dev = (struct net_device *) dev_id;
4567 struct s2io_nic *sp = dev->priv;
4568 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4571 struct mac_info *mac_control;
4572 struct config_param *config;
4574 /* Pretend we handled any irq's from a disconnected card */
4575 if (pci_channel_offline(sp->pdev))
4578 atomic_inc(&sp->isr_cnt);
4579 mac_control = &sp->mac_control;
4580 config = &sp->config;
4583 * Identify the cause for interrupt and call the appropriate
4584 * interrupt handler. Causes for the interrupt could be;
4588 * 4. Error in any functional blocks of the NIC.
4590 reason = readq(&bar0->general_int_status);
4593 /* The interrupt was not raised by us. */
4594 atomic_dec(&sp->isr_cnt);
4597 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4598 /* Disable device and get out */
4599 atomic_dec(&sp->isr_cnt);
4604 if (reason & GEN_INTR_RXTRAFFIC) {
4605 if (likely (netif_rx_schedule_prep(dev, &sp->napi))) {
4606 __netif_rx_schedule(dev, &sp->napi);
4607 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4610 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4614 * Rx handler is called by default, without checking for the
4615 * cause of interrupt.
4616 * rx_traffic_int reg is an R1 register, writing all 1's
4617 * will ensure that the actual interrupt causing bit get's
4618 * cleared and hence a read can be avoided.
4620 if (reason & GEN_INTR_RXTRAFFIC)
4621 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4623 for (i = 0; i < config->rx_ring_num; i++) {
4624 rx_intr_handler(&mac_control->rings[i]);
4629 * tx_traffic_int reg is an R1 register, writing all 1's
4630 * will ensure that the actual interrupt causing bit get's
4631 * cleared and hence a read can be avoided.
4633 if (reason & GEN_INTR_TXTRAFFIC)
4634 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4636 for (i = 0; i < config->tx_fifo_num; i++)
4637 tx_intr_handler(&mac_control->fifos[i]);
4639 if (reason & GEN_INTR_TXPIC)
4640 s2io_txpic_intr_handle(sp);
4642 * If the Rx buffer count is below the panic threshold then
4643 * reallocate the buffers from the interrupt handler itself,
4644 * else schedule a tasklet to reallocate the buffers.
4647 for (i = 0; i < config->rx_ring_num; i++)
4648 s2io_chk_rx_buffers(sp, i);
4651 writeq(0, &bar0->general_int_mask);
4652 readl(&bar0->general_int_status);
4654 atomic_dec(&sp->isr_cnt);
4661 static void s2io_updt_stats(struct s2io_nic *sp)
4663 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4667 if (atomic_read(&sp->card_state) == CARD_UP) {
4668 /* Apprx 30us on a 133 MHz bus */
4669 val64 = SET_UPDT_CLICKS(10) |
4670 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4671 writeq(val64, &bar0->stat_cfg);
4674 val64 = readq(&bar0->stat_cfg);
4675 if (!(val64 & BIT(0)))
4679 break; /* Updt failed */
4685 * s2io_get_stats - Updates the device statistics structure.
4686 * @dev : pointer to the device structure.
4688 * This function updates the device statistics structure in the s2io_nic
4689 * structure and returns a pointer to the same.
4691 * pointer to the updated net_device_stats structure.
4694 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4696 struct s2io_nic *sp = dev->priv;
4697 struct mac_info *mac_control;
4698 struct config_param *config;
4701 mac_control = &sp->mac_control;
4702 config = &sp->config;
4704 /* Configure Stats for immediate updt */
4705 s2io_updt_stats(sp);
4707 sp->stats.tx_packets =
4708 le32_to_cpu(mac_control->stats_info->tmac_frms);
4709 sp->stats.tx_errors =
4710 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4711 sp->stats.rx_errors =
4712 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4713 sp->stats.multicast =
4714 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4715 sp->stats.rx_length_errors =
4716 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4718 return (&sp->stats);
4722 * s2io_set_multicast - entry point for multicast address enable/disable.
4723 * @dev : pointer to the device structure
4725 * This function is a driver entry point which gets called by the kernel
4726 * whenever multicast addresses must be enabled/disabled. This also gets
4727 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4728 * determine, if multicast address must be enabled or if promiscuous mode
4729 * is to be disabled etc.
4734 static void s2io_set_multicast(struct net_device *dev)
4737 struct dev_mc_list *mclist;
4738 struct s2io_nic *sp = dev->priv;
4739 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4740 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4742 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4745 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4746 /* Enable all Multicast addresses */
4747 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4748 &bar0->rmac_addr_data0_mem);
4749 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4750 &bar0->rmac_addr_data1_mem);
4751 val64 = RMAC_ADDR_CMD_MEM_WE |
4752 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4753 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4754 writeq(val64, &bar0->rmac_addr_cmd_mem);
4755 /* Wait till command completes */
4756 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4757 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4761 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4762 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4763 /* Disable all Multicast addresses */
4764 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4765 &bar0->rmac_addr_data0_mem);
4766 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4767 &bar0->rmac_addr_data1_mem);
4768 val64 = RMAC_ADDR_CMD_MEM_WE |
4769 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4770 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4771 writeq(val64, &bar0->rmac_addr_cmd_mem);
4772 /* Wait till command completes */
4773 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4774 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4778 sp->all_multi_pos = 0;
4781 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4782 /* Put the NIC into promiscuous mode */
4783 add = &bar0->mac_cfg;
4784 val64 = readq(&bar0->mac_cfg);
4785 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4787 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4788 writel((u32) val64, add);
4789 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4790 writel((u32) (val64 >> 32), (add + 4));
4792 if (vlan_tag_strip != 1) {
4793 val64 = readq(&bar0->rx_pa_cfg);
4794 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4795 writeq(val64, &bar0->rx_pa_cfg);
4796 vlan_strip_flag = 0;
4799 val64 = readq(&bar0->mac_cfg);
4800 sp->promisc_flg = 1;
4801 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4803 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4804 /* Remove the NIC from promiscuous mode */
4805 add = &bar0->mac_cfg;
4806 val64 = readq(&bar0->mac_cfg);
4807 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4809 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4810 writel((u32) val64, add);
4811 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4812 writel((u32) (val64 >> 32), (add + 4));
4814 if (vlan_tag_strip != 0) {
4815 val64 = readq(&bar0->rx_pa_cfg);
4816 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4817 writeq(val64, &bar0->rx_pa_cfg);
4818 vlan_strip_flag = 1;
4821 val64 = readq(&bar0->mac_cfg);
4822 sp->promisc_flg = 0;
4823 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4827 /* Update individual M_CAST address list */
4828 if ((!sp->m_cast_flg) && dev->mc_count) {
4830 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4831 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4833 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4834 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4838 prev_cnt = sp->mc_addr_count;
4839 sp->mc_addr_count = dev->mc_count;
4841 /* Clear out the previous list of Mc in the H/W. */
4842 for (i = 0; i < prev_cnt; i++) {
4843 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4844 &bar0->rmac_addr_data0_mem);
4845 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4846 &bar0->rmac_addr_data1_mem);
4847 val64 = RMAC_ADDR_CMD_MEM_WE |
4848 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4849 RMAC_ADDR_CMD_MEM_OFFSET
4850 (MAC_MC_ADDR_START_OFFSET + i);
4851 writeq(val64, &bar0->rmac_addr_cmd_mem);
4853 /* Wait for command completes */
4854 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4855 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4857 DBG_PRINT(ERR_DBG, "%s: Adding ",
4859 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4864 /* Create the new Rx filter list and update the same in H/W. */
4865 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4866 i++, mclist = mclist->next) {
4867 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4870 for (j = 0; j < ETH_ALEN; j++) {
4871 mac_addr |= mclist->dmi_addr[j];
4875 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4876 &bar0->rmac_addr_data0_mem);
4877 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4878 &bar0->rmac_addr_data1_mem);
4879 val64 = RMAC_ADDR_CMD_MEM_WE |
4880 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4881 RMAC_ADDR_CMD_MEM_OFFSET
4882 (i + MAC_MC_ADDR_START_OFFSET);
4883 writeq(val64, &bar0->rmac_addr_cmd_mem);
4885 /* Wait for command completes */
4886 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4887 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4889 DBG_PRINT(ERR_DBG, "%s: Adding ",
4891 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4899 * s2io_set_mac_addr - Programs the Xframe mac address
4900 * @dev : pointer to the device structure.
4901 * @addr: a uchar pointer to the new mac address which is to be set.
4902 * Description : This procedure will program the Xframe to receive
4903 * frames with new Mac Address
4904 * Return value: SUCCESS on success and an appropriate (-)ve integer
4905 * as defined in errno.h file on failure.
4908 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4910 struct s2io_nic *sp = dev->priv;
4911 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4912 register u64 val64, mac_addr = 0;
4914 u64 old_mac_addr = 0;
4917 * Set the new MAC address as the new unicast filter and reflect this
4918 * change on the device address registered with the OS. It will be
4921 for (i = 0; i < ETH_ALEN; i++) {
4923 mac_addr |= addr[i];
4925 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4931 /* Update the internal structure with this new mac address */
4932 if(mac_addr != old_mac_addr) {
4933 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4934 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4935 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4936 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4937 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4938 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4939 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4942 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4943 &bar0->rmac_addr_data0_mem);
4946 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4947 RMAC_ADDR_CMD_MEM_OFFSET(0);
4948 writeq(val64, &bar0->rmac_addr_cmd_mem);
4949 /* Wait till command completes */
4950 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4951 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4952 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4960 * s2io_ethtool_sset - Sets different link parameters.
4961 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4962 * @info: pointer to the structure with parameters given by ethtool to set
4965 * The function sets different link parameters provided by the user onto
4971 static int s2io_ethtool_sset(struct net_device *dev,
4972 struct ethtool_cmd *info)
4974 struct s2io_nic *sp = dev->priv;
4975 if ((info->autoneg == AUTONEG_ENABLE) ||
4976 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4979 s2io_close(sp->dev);
4987 * s2io_ethtol_gset - Return link specific information.
4988 * @sp : private member of the device structure, pointer to the
4989 * s2io_nic structure.
4990 * @info : pointer to the structure with parameters given by ethtool
4991 * to return link information.
4993 * Returns link specific information like speed, duplex etc.. to ethtool.
4995 * return 0 on success.
4998 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5000 struct s2io_nic *sp = dev->priv;
5001 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5002 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5003 info->port = PORT_FIBRE;
5004 /* info->transceiver?? TODO */
5006 if (netif_carrier_ok(sp->dev)) {
5007 info->speed = 10000;
5008 info->duplex = DUPLEX_FULL;
5014 info->autoneg = AUTONEG_DISABLE;
5019 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5020 * @sp : private member of the device structure, which is a pointer to the
5021 * s2io_nic structure.
5022 * @info : pointer to the structure with parameters given by ethtool to
5023 * return driver information.
5025 * Returns driver specefic information like name, version etc.. to ethtool.
5030 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5031 struct ethtool_drvinfo *info)
5033 struct s2io_nic *sp = dev->priv;
5035 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5036 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5037 strncpy(info->fw_version, "", sizeof(info->fw_version));
5038 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5039 info->regdump_len = XENA_REG_SPACE;
5040 info->eedump_len = XENA_EEPROM_SPACE;
5041 info->testinfo_len = S2IO_TEST_LEN;
5043 if (sp->device_type == XFRAME_I_DEVICE)
5044 info->n_stats = XFRAME_I_STAT_LEN;
5046 info->n_stats = XFRAME_II_STAT_LEN;
5050 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5051 * @sp: private member of the device structure, which is a pointer to the
5052 * s2io_nic structure.
5053 * @regs : pointer to the structure with parameters given by ethtool for
5054 * dumping the registers.
5055 * @reg_space: The input argumnet into which all the registers are dumped.
5057 * Dumps the entire register space of xFrame NIC into the user given
5063 static void s2io_ethtool_gregs(struct net_device *dev,
5064 struct ethtool_regs *regs, void *space)
5068 u8 *reg_space = (u8 *) space;
5069 struct s2io_nic *sp = dev->priv;
5071 regs->len = XENA_REG_SPACE;
5072 regs->version = sp->pdev->subsystem_device;
5074 for (i = 0; i < regs->len; i += 8) {
5075 reg = readq(sp->bar0 + i);
5076 memcpy((reg_space + i), ®, 8);
5081 * s2io_phy_id - timer function that alternates adapter LED.
5082 * @data : address of the private member of the device structure, which
5083 * is a pointer to the s2io_nic structure, provided as an u32.
5084 * Description: This is actually the timer function that alternates the
5085 * adapter LED bit of the adapter control bit to set/reset every time on
5086 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5087 * once every second.
5089 static void s2io_phy_id(unsigned long data)
5091 struct s2io_nic *sp = (struct s2io_nic *) data;
5092 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5096 subid = sp->pdev->subsystem_device;
5097 if ((sp->device_type == XFRAME_II_DEVICE) ||
5098 ((subid & 0xFF) >= 0x07)) {
5099 val64 = readq(&bar0->gpio_control);
5100 val64 ^= GPIO_CTRL_GPIO_0;
5101 writeq(val64, &bar0->gpio_control);
5103 val64 = readq(&bar0->adapter_control);
5104 val64 ^= ADAPTER_LED_ON;
5105 writeq(val64, &bar0->adapter_control);
5108 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5112 * s2io_ethtool_idnic - To physically identify the nic on the system.
5113 * @sp : private member of the device structure, which is a pointer to the
5114 * s2io_nic structure.
5115 * @id : pointer to the structure with identification parameters given by
5117 * Description: Used to physically identify the NIC on the system.
5118 * The Link LED will blink for a time specified by the user for
5120 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5121 * identification is possible only if it's link is up.
5123 * int , returns 0 on success
5126 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5128 u64 val64 = 0, last_gpio_ctrl_val;
5129 struct s2io_nic *sp = dev->priv;
5130 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5133 subid = sp->pdev->subsystem_device;
5134 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5135 if ((sp->device_type == XFRAME_I_DEVICE) &&
5136 ((subid & 0xFF) < 0x07)) {
5137 val64 = readq(&bar0->adapter_control);
5138 if (!(val64 & ADAPTER_CNTL_EN)) {
5140 "Adapter Link down, cannot blink LED\n");
5144 if (sp->id_timer.function == NULL) {
5145 init_timer(&sp->id_timer);
5146 sp->id_timer.function = s2io_phy_id;
5147 sp->id_timer.data = (unsigned long) sp;
5149 mod_timer(&sp->id_timer, jiffies);
5151 msleep_interruptible(data * HZ);
5153 msleep_interruptible(MAX_FLICKER_TIME);
5154 del_timer_sync(&sp->id_timer);
5156 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5157 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5158 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5164 static void s2io_ethtool_gringparam(struct net_device *dev,
5165 struct ethtool_ringparam *ering)
5167 struct s2io_nic *sp = dev->priv;
5168 int i,tx_desc_count=0,rx_desc_count=0;
5170 if (sp->rxd_mode == RXD_MODE_1)
5171 ering->rx_max_pending = MAX_RX_DESC_1;
5172 else if (sp->rxd_mode == RXD_MODE_3B)
5173 ering->rx_max_pending = MAX_RX_DESC_2;
5175 ering->tx_max_pending = MAX_TX_DESC;
5176 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5177 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5179 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5180 ering->tx_pending = tx_desc_count;
5182 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5183 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5185 ering->rx_pending = rx_desc_count;
5187 ering->rx_mini_max_pending = 0;
5188 ering->rx_mini_pending = 0;
5189 if(sp->rxd_mode == RXD_MODE_1)
5190 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5191 else if (sp->rxd_mode == RXD_MODE_3B)
5192 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5193 ering->rx_jumbo_pending = rx_desc_count;
5197 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5198 * @sp : private member of the device structure, which is a pointer to the
5199 * s2io_nic structure.
5200 * @ep : pointer to the structure with pause parameters given by ethtool.
5202 * Returns the Pause frame generation and reception capability of the NIC.
5206 static void s2io_ethtool_getpause_data(struct net_device *dev,
5207 struct ethtool_pauseparam *ep)
5210 struct s2io_nic *sp = dev->priv;
5211 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5213 val64 = readq(&bar0->rmac_pause_cfg);
5214 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5215 ep->tx_pause = TRUE;
5216 if (val64 & RMAC_PAUSE_RX_ENABLE)
5217 ep->rx_pause = TRUE;
5218 ep->autoneg = FALSE;
5222 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5223 * @sp : private member of the device structure, which is a pointer to the
5224 * s2io_nic structure.
5225 * @ep : pointer to the structure with pause parameters given by ethtool.
5227 * It can be used to set or reset Pause frame generation or reception
5228 * support of the NIC.
5230 * int, returns 0 on Success
5233 static int s2io_ethtool_setpause_data(struct net_device *dev,
5234 struct ethtool_pauseparam *ep)
5237 struct s2io_nic *sp = dev->priv;
5238 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5240 val64 = readq(&bar0->rmac_pause_cfg);
5242 val64 |= RMAC_PAUSE_GEN_ENABLE;
5244 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5246 val64 |= RMAC_PAUSE_RX_ENABLE;
5248 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5249 writeq(val64, &bar0->rmac_pause_cfg);
5254 * read_eeprom - reads 4 bytes of data from user given offset.
5255 * @sp : private member of the device structure, which is a pointer to the
5256 * s2io_nic structure.
5257 * @off : offset at which the data must be written
5258 * @data : Its an output parameter where the data read at the given
5261 * Will read 4 bytes of data from the user given offset and return the
5263 * NOTE: Will allow to read only part of the EEPROM visible through the
5266 * -1 on failure and 0 on success.
5269 #define S2IO_DEV_ID 5
5270 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5275 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5277 if (sp->device_type == XFRAME_I_DEVICE) {
5278 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5279 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5280 I2C_CONTROL_CNTL_START;
5281 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5283 while (exit_cnt < 5) {
5284 val64 = readq(&bar0->i2c_control);
5285 if (I2C_CONTROL_CNTL_END(val64)) {
5286 *data = I2C_CONTROL_GET_DATA(val64);
5295 if (sp->device_type == XFRAME_II_DEVICE) {
5296 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5297 SPI_CONTROL_BYTECNT(0x3) |
5298 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5299 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5300 val64 |= SPI_CONTROL_REQ;
5301 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5302 while (exit_cnt < 5) {
5303 val64 = readq(&bar0->spi_control);
5304 if (val64 & SPI_CONTROL_NACK) {
5307 } else if (val64 & SPI_CONTROL_DONE) {
5308 *data = readq(&bar0->spi_data);
5321 * write_eeprom - actually writes the relevant part of the data value.
5322 * @sp : private member of the device structure, which is a pointer to the
5323 * s2io_nic structure.
5324 * @off : offset at which the data must be written
5325 * @data : The data that is to be written
5326 * @cnt : Number of bytes of the data that are actually to be written into
5327 * the Eeprom. (max of 3)
5329 * Actually writes the relevant part of the data value into the Eeprom
5330 * through the I2C bus.
5332 * 0 on success, -1 on failure.
5335 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5337 int exit_cnt = 0, ret = -1;
5339 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5341 if (sp->device_type == XFRAME_I_DEVICE) {
5342 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5343 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5344 I2C_CONTROL_CNTL_START;
5345 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5347 while (exit_cnt < 5) {
5348 val64 = readq(&bar0->i2c_control);
5349 if (I2C_CONTROL_CNTL_END(val64)) {
5350 if (!(val64 & I2C_CONTROL_NACK))
5359 if (sp->device_type == XFRAME_II_DEVICE) {
5360 int write_cnt = (cnt == 8) ? 0 : cnt;
5361 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5363 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5364 SPI_CONTROL_BYTECNT(write_cnt) |
5365 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5366 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5367 val64 |= SPI_CONTROL_REQ;
5368 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5369 while (exit_cnt < 5) {
5370 val64 = readq(&bar0->spi_control);
5371 if (val64 & SPI_CONTROL_NACK) {
5374 } else if (val64 & SPI_CONTROL_DONE) {
5384 static void s2io_vpd_read(struct s2io_nic *nic)
5388 int i=0, cnt, fail = 0;
5389 int vpd_addr = 0x80;
5391 if (nic->device_type == XFRAME_II_DEVICE) {
5392 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5396 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5399 strcpy(nic->serial_num, "NOT AVAILABLE");
5401 vpd_data = kmalloc(256, GFP_KERNEL);
5403 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5406 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5408 for (i = 0; i < 256; i +=4 ) {
5409 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5410 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5411 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5412 for (cnt = 0; cnt <5; cnt++) {
5414 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5419 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5423 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5424 (u32 *)&vpd_data[i]);
5428 /* read serial number of adapter */
5429 for (cnt = 0; cnt < 256; cnt++) {
5430 if ((vpd_data[cnt] == 'S') &&
5431 (vpd_data[cnt+1] == 'N') &&
5432 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5433 memset(nic->serial_num, 0, VPD_STRING_LEN);
5434 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5441 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5442 memset(nic->product_name, 0, vpd_data[1]);
5443 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5446 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5450 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5451 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5452 * @eeprom : pointer to the user level structure provided by ethtool,
5453 * containing all relevant information.
5454 * @data_buf : user defined value to be written into Eeprom.
5455 * Description: Reads the values stored in the Eeprom at given offset
5456 * for a given length. Stores these values int the input argument data
5457 * buffer 'data_buf' and returns these to the caller (ethtool.)
5462 static int s2io_ethtool_geeprom(struct net_device *dev,
5463 struct ethtool_eeprom *eeprom, u8 * data_buf)
5467 struct s2io_nic *sp = dev->priv;
5469 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5471 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5472 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5474 for (i = 0; i < eeprom->len; i += 4) {
5475 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5476 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5480 memcpy((data_buf + i), &valid, 4);
5486 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5487 * @sp : private member of the device structure, which is a pointer to the
5488 * s2io_nic structure.
5489 * @eeprom : pointer to the user level structure provided by ethtool,
5490 * containing all relevant information.
5491 * @data_buf ; user defined value to be written into Eeprom.
5493 * Tries to write the user provided value in the Eeprom, at the offset
5494 * given by the user.
5496 * 0 on success, -EFAULT on failure.
5499 static int s2io_ethtool_seeprom(struct net_device *dev,
5500 struct ethtool_eeprom *eeprom,
5503 int len = eeprom->len, cnt = 0;
5504 u64 valid = 0, data;
5505 struct s2io_nic *sp = dev->priv;
5507 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5509 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5510 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5516 data = (u32) data_buf[cnt] & 0x000000FF;
5518 valid = (u32) (data << 24);
5522 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5524 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5526 "write into the specified offset\n");
5537 * s2io_register_test - reads and writes into all clock domains.
5538 * @sp : private member of the device structure, which is a pointer to the
5539 * s2io_nic structure.
5540 * @data : variable that returns the result of each of the test conducted b
5543 * Read and write into all clock domains. The NIC has 3 clock domains,
5544 * see that registers in all the three regions are accessible.
5549 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5551 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5552 u64 val64 = 0, exp_val;
5555 val64 = readq(&bar0->pif_rd_swapper_fb);
5556 if (val64 != 0x123456789abcdefULL) {
5558 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5561 val64 = readq(&bar0->rmac_pause_cfg);
5562 if (val64 != 0xc000ffff00000000ULL) {
5564 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5567 val64 = readq(&bar0->rx_queue_cfg);
5568 if (sp->device_type == XFRAME_II_DEVICE)
5569 exp_val = 0x0404040404040404ULL;
5571 exp_val = 0x0808080808080808ULL;
5572 if (val64 != exp_val) {
5574 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5577 val64 = readq(&bar0->xgxs_efifo_cfg);
5578 if (val64 != 0x000000001923141EULL) {
5580 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5583 val64 = 0x5A5A5A5A5A5A5A5AULL;
5584 writeq(val64, &bar0->xmsi_data);
5585 val64 = readq(&bar0->xmsi_data);
5586 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5588 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5591 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5592 writeq(val64, &bar0->xmsi_data);
5593 val64 = readq(&bar0->xmsi_data);
5594 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5596 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5604 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5605 * @sp : private member of the device structure, which is a pointer to the
5606 * s2io_nic structure.
5607 * @data:variable that returns the result of each of the test conducted by
5610 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5616 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5619 u64 ret_data, org_4F0, org_7F0;
5620 u8 saved_4F0 = 0, saved_7F0 = 0;
5621 struct net_device *dev = sp->dev;
5623 /* Test Write Error at offset 0 */
5624 /* Note that SPI interface allows write access to all areas
5625 * of EEPROM. Hence doing all negative testing only for Xframe I.
5627 if (sp->device_type == XFRAME_I_DEVICE)
5628 if (!write_eeprom(sp, 0, 0, 3))
5631 /* Save current values at offsets 0x4F0 and 0x7F0 */
5632 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5634 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5637 /* Test Write at offset 4f0 */
5638 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5640 if (read_eeprom(sp, 0x4F0, &ret_data))
5643 if (ret_data != 0x012345) {
5644 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5645 "Data written %llx Data read %llx\n",
5646 dev->name, (unsigned long long)0x12345,
5647 (unsigned long long)ret_data);
5651 /* Reset the EEPROM data go FFFF */
5652 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5654 /* Test Write Request Error at offset 0x7c */
5655 if (sp->device_type == XFRAME_I_DEVICE)
5656 if (!write_eeprom(sp, 0x07C, 0, 3))
5659 /* Test Write Request at offset 0x7f0 */
5660 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5662 if (read_eeprom(sp, 0x7F0, &ret_data))
5665 if (ret_data != 0x012345) {
5666 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5667 "Data written %llx Data read %llx\n",
5668 dev->name, (unsigned long long)0x12345,
5669 (unsigned long long)ret_data);
5673 /* Reset the EEPROM data go FFFF */
5674 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5676 if (sp->device_type == XFRAME_I_DEVICE) {
5677 /* Test Write Error at offset 0x80 */
5678 if (!write_eeprom(sp, 0x080, 0, 3))
5681 /* Test Write Error at offset 0xfc */
5682 if (!write_eeprom(sp, 0x0FC, 0, 3))
5685 /* Test Write Error at offset 0x100 */
5686 if (!write_eeprom(sp, 0x100, 0, 3))
5689 /* Test Write Error at offset 4ec */
5690 if (!write_eeprom(sp, 0x4EC, 0, 3))
5694 /* Restore values at offsets 0x4F0 and 0x7F0 */
5696 write_eeprom(sp, 0x4F0, org_4F0, 3);
5698 write_eeprom(sp, 0x7F0, org_7F0, 3);
5705 * s2io_bist_test - invokes the MemBist test of the card .
5706 * @sp : private member of the device structure, which is a pointer to the
5707 * s2io_nic structure.
5708 * @data:variable that returns the result of each of the test conducted by
5711 * This invokes the MemBist test of the card. We give around
5712 * 2 secs time for the Test to complete. If it's still not complete
5713 * within this peiod, we consider that the test failed.
5715 * 0 on success and -1 on failure.
5718 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5721 int cnt = 0, ret = -1;
5723 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5724 bist |= PCI_BIST_START;
5725 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5728 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5729 if (!(bist & PCI_BIST_START)) {
5730 *data = (bist & PCI_BIST_CODE_MASK);
5742 * s2io-link_test - verifies the link state of the nic
5743 * @sp ; private member of the device structure, which is a pointer to the
5744 * s2io_nic structure.
5745 * @data: variable that returns the result of each of the test conducted by
5748 * The function verifies the link state of the NIC and updates the input
5749 * argument 'data' appropriately.
5754 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5756 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5759 val64 = readq(&bar0->adapter_status);
5760 if(!(LINK_IS_UP(val64)))
5769 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5770 * @sp - private member of the device structure, which is a pointer to the
5771 * s2io_nic structure.
5772 * @data - variable that returns the result of each of the test
5773 * conducted by the driver.
5775 * This is one of the offline test that tests the read and write
5776 * access to the RldRam chip on the NIC.
5781 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5783 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5785 int cnt, iteration = 0, test_fail = 0;
5787 val64 = readq(&bar0->adapter_control);
5788 val64 &= ~ADAPTER_ECC_EN;
5789 writeq(val64, &bar0->adapter_control);
5791 val64 = readq(&bar0->mc_rldram_test_ctrl);
5792 val64 |= MC_RLDRAM_TEST_MODE;
5793 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5795 val64 = readq(&bar0->mc_rldram_mrs);
5796 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5797 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5799 val64 |= MC_RLDRAM_MRS_ENABLE;
5800 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5802 while (iteration < 2) {
5803 val64 = 0x55555555aaaa0000ULL;
5804 if (iteration == 1) {
5805 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5807 writeq(val64, &bar0->mc_rldram_test_d0);
5809 val64 = 0xaaaa5a5555550000ULL;
5810 if (iteration == 1) {
5811 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5813 writeq(val64, &bar0->mc_rldram_test_d1);
5815 val64 = 0x55aaaaaaaa5a0000ULL;
5816 if (iteration == 1) {
5817 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5819 writeq(val64, &bar0->mc_rldram_test_d2);
5821 val64 = (u64) (0x0000003ffffe0100ULL);
5822 writeq(val64, &bar0->mc_rldram_test_add);
5824 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5826 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5828 for (cnt = 0; cnt < 5; cnt++) {
5829 val64 = readq(&bar0->mc_rldram_test_ctrl);
5830 if (val64 & MC_RLDRAM_TEST_DONE)
5838 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5839 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5841 for (cnt = 0; cnt < 5; cnt++) {
5842 val64 = readq(&bar0->mc_rldram_test_ctrl);
5843 if (val64 & MC_RLDRAM_TEST_DONE)
5851 val64 = readq(&bar0->mc_rldram_test_ctrl);
5852 if (!(val64 & MC_RLDRAM_TEST_PASS))
5860 /* Bring the adapter out of test mode */
5861 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5867 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5868 * @sp : private member of the device structure, which is a pointer to the
5869 * s2io_nic structure.
5870 * @ethtest : pointer to a ethtool command specific structure that will be
5871 * returned to the user.
5872 * @data : variable that returns the result of each of the test
5873 * conducted by the driver.
5875 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5876 * the health of the card.
5881 static void s2io_ethtool_test(struct net_device *dev,
5882 struct ethtool_test *ethtest,
5885 struct s2io_nic *sp = dev->priv;
5886 int orig_state = netif_running(sp->dev);
5888 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5889 /* Offline Tests. */
5891 s2io_close(sp->dev);
5893 if (s2io_register_test(sp, &data[0]))
5894 ethtest->flags |= ETH_TEST_FL_FAILED;
5898 if (s2io_rldram_test(sp, &data[3]))
5899 ethtest->flags |= ETH_TEST_FL_FAILED;
5903 if (s2io_eeprom_test(sp, &data[1]))
5904 ethtest->flags |= ETH_TEST_FL_FAILED;
5906 if (s2io_bist_test(sp, &data[4]))
5907 ethtest->flags |= ETH_TEST_FL_FAILED;
5917 "%s: is not up, cannot run test\n",
5926 if (s2io_link_test(sp, &data[2]))
5927 ethtest->flags |= ETH_TEST_FL_FAILED;
5936 static void s2io_get_ethtool_stats(struct net_device *dev,
5937 struct ethtool_stats *estats,
5941 struct s2io_nic *sp = dev->priv;
5942 struct stat_block *stat_info = sp->mac_control.stats_info;
5944 s2io_updt_stats(sp);
5946 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5947 le32_to_cpu(stat_info->tmac_frms);
5949 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5950 le32_to_cpu(stat_info->tmac_data_octets);
5951 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5953 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5954 le32_to_cpu(stat_info->tmac_mcst_frms);
5956 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5957 le32_to_cpu(stat_info->tmac_bcst_frms);
5958 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5960 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5961 le32_to_cpu(stat_info->tmac_ttl_octets);
5963 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5964 le32_to_cpu(stat_info->tmac_ucst_frms);
5966 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5967 le32_to_cpu(stat_info->tmac_nucst_frms);
5969 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5970 le32_to_cpu(stat_info->tmac_any_err_frms);
5971 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5972 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5974 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5975 le32_to_cpu(stat_info->tmac_vld_ip);
5977 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5978 le32_to_cpu(stat_info->tmac_drop_ip);
5980 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5981 le32_to_cpu(stat_info->tmac_icmp);
5983 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5984 le32_to_cpu(stat_info->tmac_rst_tcp);
5985 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5986 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5987 le32_to_cpu(stat_info->tmac_udp);
5989 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5990 le32_to_cpu(stat_info->rmac_vld_frms);
5992 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5993 le32_to_cpu(stat_info->rmac_data_octets);
5994 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5995 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5997 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5998 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6000 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6001 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6002 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6003 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6004 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6005 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6006 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6008 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6009 le32_to_cpu(stat_info->rmac_ttl_octets);
6011 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6012 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6014 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6015 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6017 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6018 le32_to_cpu(stat_info->rmac_discarded_frms);
6020 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6021 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6022 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6023 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6025 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6026 le32_to_cpu(stat_info->rmac_usized_frms);
6028 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6029 le32_to_cpu(stat_info->rmac_osized_frms);
6031 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6032 le32_to_cpu(stat_info->rmac_frag_frms);
6034 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6035 le32_to_cpu(stat_info->rmac_jabber_frms);
6036 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6037 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6038 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6039 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6040 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6041 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6043 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6044 le32_to_cpu(stat_info->rmac_ip);
6045 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6046 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6048 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6049 le32_to_cpu(stat_info->rmac_drop_ip);
6051 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6052 le32_to_cpu(stat_info->rmac_icmp);
6053 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6055 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6056 le32_to_cpu(stat_info->rmac_udp);
6058 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6059 le32_to_cpu(stat_info->rmac_err_drp_udp);
6060 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6061 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6062 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6063 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6064 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6065 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6066 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6067 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6068 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6069 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6070 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6071 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6072 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6073 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6074 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6075 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6076 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6078 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6079 le32_to_cpu(stat_info->rmac_pause_cnt);
6080 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6081 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6083 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6084 le32_to_cpu(stat_info->rmac_accepted_ip);
6085 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6086 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6087 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6088 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6089 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6090 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6091 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6092 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6093 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6094 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6095 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6096 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6097 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6098 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6099 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6100 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6101 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6102 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6103 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6105 /* Enhanced statistics exist only for Hercules */
6106 if(sp->device_type == XFRAME_II_DEVICE) {
6108 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6110 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6112 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6113 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6114 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6115 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6116 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6117 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6118 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6119 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6120 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6121 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6122 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6123 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6124 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6125 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6129 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6130 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6131 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6132 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6133 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6134 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6135 for (k = 0; k < MAX_RX_RINGS; k++)
6136 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6137 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6138 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6139 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6140 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6141 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6142 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6143 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6144 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6145 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6146 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6147 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6148 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6149 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6150 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6151 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6152 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6153 if (stat_info->sw_stat.num_aggregations) {
6154 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6157 * Since 64-bit divide does not work on all platforms,
6158 * do repeated subtraction.
6160 while (tmp >= stat_info->sw_stat.num_aggregations) {
6161 tmp -= stat_info->sw_stat.num_aggregations;
6164 tmp_stats[i++] = count;
6168 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6169 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6170 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6171 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6172 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6173 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6174 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6175 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6176 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6178 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6179 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6180 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6181 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6182 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6184 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6185 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6186 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6187 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6188 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6189 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6190 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6191 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6192 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6193 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6194 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6195 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6196 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6197 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6198 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6199 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6200 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6201 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6202 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6203 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6204 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6205 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6206 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6207 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6208 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6209 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6212 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6214 return (XENA_REG_SPACE);
6218 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6220 struct s2io_nic *sp = dev->priv;
6222 return (sp->rx_csum);
6225 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6227 struct s2io_nic *sp = dev->priv;
6237 static int s2io_get_eeprom_len(struct net_device *dev)
6239 return (XENA_EEPROM_SPACE);
6242 static int s2io_ethtool_self_test_count(struct net_device *dev)
6244 return (S2IO_TEST_LEN);
6247 static void s2io_ethtool_get_strings(struct net_device *dev,
6248 u32 stringset, u8 * data)
6251 struct s2io_nic *sp = dev->priv;
6253 switch (stringset) {
6255 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6258 stat_size = sizeof(ethtool_xena_stats_keys);
6259 memcpy(data, ðtool_xena_stats_keys,stat_size);
6260 if(sp->device_type == XFRAME_II_DEVICE) {
6261 memcpy(data + stat_size,
6262 ðtool_enhanced_stats_keys,
6263 sizeof(ethtool_enhanced_stats_keys));
6264 stat_size += sizeof(ethtool_enhanced_stats_keys);
6267 memcpy(data + stat_size, ðtool_driver_stats_keys,
6268 sizeof(ethtool_driver_stats_keys));
6271 static int s2io_ethtool_get_stats_count(struct net_device *dev)
6273 struct s2io_nic *sp = dev->priv;
6275 switch(sp->device_type) {
6276 case XFRAME_I_DEVICE:
6277 stat_count = XFRAME_I_STAT_LEN;
6280 case XFRAME_II_DEVICE:
6281 stat_count = XFRAME_II_STAT_LEN;
6288 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6291 dev->features |= NETIF_F_IP_CSUM;
6293 dev->features &= ~NETIF_F_IP_CSUM;
6298 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6300 return (dev->features & NETIF_F_TSO) != 0;
6302 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6305 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6307 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6312 static const struct ethtool_ops netdev_ethtool_ops = {
6313 .get_settings = s2io_ethtool_gset,
6314 .set_settings = s2io_ethtool_sset,
6315 .get_drvinfo = s2io_ethtool_gdrvinfo,
6316 .get_regs_len = s2io_ethtool_get_regs_len,
6317 .get_regs = s2io_ethtool_gregs,
6318 .get_link = ethtool_op_get_link,
6319 .get_eeprom_len = s2io_get_eeprom_len,
6320 .get_eeprom = s2io_ethtool_geeprom,
6321 .set_eeprom = s2io_ethtool_seeprom,
6322 .get_ringparam = s2io_ethtool_gringparam,
6323 .get_pauseparam = s2io_ethtool_getpause_data,
6324 .set_pauseparam = s2io_ethtool_setpause_data,
6325 .get_rx_csum = s2io_ethtool_get_rx_csum,
6326 .set_rx_csum = s2io_ethtool_set_rx_csum,
6327 .get_tx_csum = ethtool_op_get_tx_csum,
6328 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6329 .get_sg = ethtool_op_get_sg,
6330 .set_sg = ethtool_op_set_sg,
6331 .get_tso = s2io_ethtool_op_get_tso,
6332 .set_tso = s2io_ethtool_op_set_tso,
6333 .get_ufo = ethtool_op_get_ufo,
6334 .set_ufo = ethtool_op_set_ufo,
6335 .self_test_count = s2io_ethtool_self_test_count,
6336 .self_test = s2io_ethtool_test,
6337 .get_strings = s2io_ethtool_get_strings,
6338 .phys_id = s2io_ethtool_idnic,
6339 .get_stats_count = s2io_ethtool_get_stats_count,
6340 .get_ethtool_stats = s2io_get_ethtool_stats
6344 * s2io_ioctl - Entry point for the Ioctl
6345 * @dev : Device pointer.
6346 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6347 * a proprietary structure used to pass information to the driver.
6348 * @cmd : This is used to distinguish between the different commands that
6349 * can be passed to the IOCTL functions.
6351 * Currently there are no special functionality supported in IOCTL, hence
6352 * function always return EOPNOTSUPPORTED
6355 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6361 * s2io_change_mtu - entry point to change MTU size for the device.
6362 * @dev : device pointer.
6363 * @new_mtu : the new MTU size for the device.
6364 * Description: A driver entry point to change MTU size for the device.
6365 * Before changing the MTU the device must be stopped.
6367 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6371 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6373 struct s2io_nic *sp = dev->priv;
6375 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6376 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6382 if (netif_running(dev)) {
6384 netif_stop_queue(dev);
6385 if (s2io_card_up(sp)) {
6386 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6389 if (netif_queue_stopped(dev))
6390 netif_wake_queue(dev);
6391 } else { /* Device is down */
6392 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6393 u64 val64 = new_mtu;
6395 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6402 * s2io_tasklet - Bottom half of the ISR.
6403 * @dev_adr : address of the device structure in dma_addr_t format.
6405 * This is the tasklet or the bottom half of the ISR. This is
6406 * an extension of the ISR which is scheduled by the scheduler to be run
6407 * when the load on the CPU is low. All low priority tasks of the ISR can
6408 * be pushed into the tasklet. For now the tasklet is used only to
6409 * replenish the Rx buffers in the Rx buffer descriptors.
6414 static void s2io_tasklet(unsigned long dev_addr)
6416 struct net_device *dev = (struct net_device *) dev_addr;
6417 struct s2io_nic *sp = dev->priv;
6419 struct mac_info *mac_control;
6420 struct config_param *config;
6422 mac_control = &sp->mac_control;
6423 config = &sp->config;
6425 if (!TASKLET_IN_USE) {
6426 for (i = 0; i < config->rx_ring_num; i++) {
6427 ret = fill_rx_buffers(sp, i);
6428 if (ret == -ENOMEM) {
6429 DBG_PRINT(INFO_DBG, "%s: Out of ",
6431 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6433 } else if (ret == -EFILL) {
6435 "%s: Rx Ring %d is full\n",
6440 clear_bit(0, (&sp->tasklet_status));
6445 * s2io_set_link - Set the LInk status
6446 * @data: long pointer to device private structue
6447 * Description: Sets the link status for the adapter
6450 static void s2io_set_link(struct work_struct *work)
6452 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6453 struct net_device *dev = nic->dev;
6454 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6460 if (!netif_running(dev))
6463 if (test_and_set_bit(0, &(nic->link_state))) {
6464 /* The card is being reset, no point doing anything */
6468 subid = nic->pdev->subsystem_device;
6469 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6471 * Allow a small delay for the NICs self initiated
6472 * cleanup to complete.
6477 val64 = readq(&bar0->adapter_status);
6478 if (LINK_IS_UP(val64)) {
6479 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6480 if (verify_xena_quiescence(nic)) {
6481 val64 = readq(&bar0->adapter_control);
6482 val64 |= ADAPTER_CNTL_EN;
6483 writeq(val64, &bar0->adapter_control);
6484 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6485 nic->device_type, subid)) {
6486 val64 = readq(&bar0->gpio_control);
6487 val64 |= GPIO_CTRL_GPIO_0;
6488 writeq(val64, &bar0->gpio_control);
6489 val64 = readq(&bar0->gpio_control);
6491 val64 |= ADAPTER_LED_ON;
6492 writeq(val64, &bar0->adapter_control);
6494 nic->device_enabled_once = TRUE;
6496 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6497 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6498 netif_stop_queue(dev);
6501 val64 = readq(&bar0->adapter_control);
6502 val64 |= ADAPTER_LED_ON;
6503 writeq(val64, &bar0->adapter_control);
6504 s2io_link(nic, LINK_UP);
6506 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6508 val64 = readq(&bar0->gpio_control);
6509 val64 &= ~GPIO_CTRL_GPIO_0;
6510 writeq(val64, &bar0->gpio_control);
6511 val64 = readq(&bar0->gpio_control);
6514 val64 = readq(&bar0->adapter_control);
6515 val64 = val64 &(~ADAPTER_LED_ON);
6516 writeq(val64, &bar0->adapter_control);
6517 s2io_link(nic, LINK_DOWN);
6519 clear_bit(0, &(nic->link_state));
6525 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6527 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6528 u64 *temp2, int size)
6530 struct net_device *dev = sp->dev;
6531 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6533 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6534 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6537 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6539 * As Rx frame are not going to be processed,
6540 * using same mapped address for the Rxd
6543 rxdp1->Buffer0_ptr = *temp0;
6545 *skb = dev_alloc_skb(size);
6547 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6548 DBG_PRINT(INFO_DBG, "memory to allocate ");
6549 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6550 sp->mac_control.stats_info->sw_stat. \
6551 mem_alloc_fail_cnt++;
6554 sp->mac_control.stats_info->sw_stat.mem_allocated
6555 += (*skb)->truesize;
6556 /* storing the mapped addr in a temp variable
6557 * such it will be used for next rxd whose
6558 * Host Control is NULL
6560 rxdp1->Buffer0_ptr = *temp0 =
6561 pci_map_single( sp->pdev, (*skb)->data,
6562 size - NET_IP_ALIGN,
6563 PCI_DMA_FROMDEVICE);
6564 if( (rxdp1->Buffer0_ptr == 0) ||
6565 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6566 goto memalloc_failed;
6568 rxdp->Host_Control = (unsigned long) (*skb);
6570 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6571 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6572 /* Two buffer Mode */
6574 rxdp3->Buffer2_ptr = *temp2;
6575 rxdp3->Buffer0_ptr = *temp0;
6576 rxdp3->Buffer1_ptr = *temp1;
6578 *skb = dev_alloc_skb(size);
6580 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6581 DBG_PRINT(INFO_DBG, "memory to allocate ");
6582 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6583 sp->mac_control.stats_info->sw_stat. \
6584 mem_alloc_fail_cnt++;
6587 sp->mac_control.stats_info->sw_stat.mem_allocated
6588 += (*skb)->truesize;
6589 rxdp3->Buffer2_ptr = *temp2 =
6590 pci_map_single(sp->pdev, (*skb)->data,
6592 PCI_DMA_FROMDEVICE);
6593 if( (rxdp3->Buffer2_ptr == 0) ||
6594 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6595 goto memalloc_failed;
6597 rxdp3->Buffer0_ptr = *temp0 =
6598 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6599 PCI_DMA_FROMDEVICE);
6600 if( (rxdp3->Buffer0_ptr == 0) ||
6601 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6602 pci_unmap_single (sp->pdev,
6603 (dma_addr_t)rxdp3->Buffer2_ptr,
6604 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6605 goto memalloc_failed;
6607 rxdp->Host_Control = (unsigned long) (*skb);
6609 /* Buffer-1 will be dummy buffer not used */
6610 rxdp3->Buffer1_ptr = *temp1 =
6611 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6612 PCI_DMA_FROMDEVICE);
6613 if( (rxdp3->Buffer1_ptr == 0) ||
6614 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6615 pci_unmap_single (sp->pdev,
6616 (dma_addr_t)rxdp3->Buffer0_ptr,
6617 BUF0_LEN, PCI_DMA_FROMDEVICE);
6618 pci_unmap_single (sp->pdev,
6619 (dma_addr_t)rxdp3->Buffer2_ptr,
6620 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6621 goto memalloc_failed;
6627 stats->pci_map_fail_cnt++;
6628 stats->mem_freed += (*skb)->truesize;
6629 dev_kfree_skb(*skb);
6633 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6636 struct net_device *dev = sp->dev;
6637 if (sp->rxd_mode == RXD_MODE_1) {
6638 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6639 } else if (sp->rxd_mode == RXD_MODE_3B) {
6640 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6641 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6642 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6646 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6648 int i, j, k, blk_cnt = 0, size;
6649 struct mac_info * mac_control = &sp->mac_control;
6650 struct config_param *config = &sp->config;
6651 struct net_device *dev = sp->dev;
6652 struct RxD_t *rxdp = NULL;
6653 struct sk_buff *skb = NULL;
6654 struct buffAdd *ba = NULL;
6655 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6657 /* Calculate the size based on ring mode */
6658 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6659 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6660 if (sp->rxd_mode == RXD_MODE_1)
6661 size += NET_IP_ALIGN;
6662 else if (sp->rxd_mode == RXD_MODE_3B)
6663 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6665 for (i = 0; i < config->rx_ring_num; i++) {
6666 blk_cnt = config->rx_cfg[i].num_rxd /
6667 (rxd_count[sp->rxd_mode] +1);
6669 for (j = 0; j < blk_cnt; j++) {
6670 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6671 rxdp = mac_control->rings[i].
6672 rx_blocks[j].rxds[k].virt_addr;
6673 if(sp->rxd_mode == RXD_MODE_3B)
6674 ba = &mac_control->rings[i].ba[j][k];
6675 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6676 &skb,(u64 *)&temp0_64,
6683 set_rxd_buffer_size(sp, rxdp, size);
6685 /* flip the Ownership bit to Hardware */
6686 rxdp->Control_1 |= RXD_OWN_XENA;
6694 static int s2io_add_isr(struct s2io_nic * sp)
6697 struct net_device *dev = sp->dev;
6700 if (sp->intr_type == MSI_X)
6701 ret = s2io_enable_msi_x(sp);
6703 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6704 sp->intr_type = INTA;
6707 /* Store the values of the MSIX table in the struct s2io_nic structure */
6708 store_xmsi_data(sp);
6710 /* After proper initialization of H/W, register ISR */
6711 if (sp->intr_type == MSI_X) {
6712 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6714 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6715 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6716 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6718 err = request_irq(sp->entries[i].vector,
6719 s2io_msix_fifo_handle, 0, sp->desc[i],
6720 sp->s2io_entries[i].arg);
6721 /* If either data or addr is zero print it */
6722 if(!(sp->msix_info[i].addr &&
6723 sp->msix_info[i].data)) {
6724 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6725 "Data:0x%lx\n",sp->desc[i],
6726 (unsigned long long)
6727 sp->msix_info[i].addr,
6729 ntohl(sp->msix_info[i].data));
6734 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6736 err = request_irq(sp->entries[i].vector,
6737 s2io_msix_ring_handle, 0, sp->desc[i],
6738 sp->s2io_entries[i].arg);
6739 /* If either data or addr is zero print it */
6740 if(!(sp->msix_info[i].addr &&
6741 sp->msix_info[i].data)) {
6742 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6743 "Data:0x%lx\n",sp->desc[i],
6744 (unsigned long long)
6745 sp->msix_info[i].addr,
6747 ntohl(sp->msix_info[i].data));
6753 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6754 "failed\n", dev->name, i);
6755 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6758 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6760 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6761 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6763 if (sp->intr_type == INTA) {
6764 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6767 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6774 static void s2io_rem_isr(struct s2io_nic * sp)
6777 struct net_device *dev = sp->dev;
6778 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6780 if (sp->intr_type == MSI_X) {
6784 for (i=1; (sp->s2io_entries[i].in_use ==
6785 MSIX_REGISTERED_SUCCESS); i++) {
6786 int vector = sp->entries[i].vector;
6787 void *arg = sp->s2io_entries[i].arg;
6789 free_irq(vector, arg);
6794 (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6795 kfree(sp->s2io_entries);
6797 (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6799 sp->s2io_entries = NULL;
6801 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6802 msi_control &= 0xFFFE; /* Disable MSI */
6803 pci_write_config_word(sp->pdev, 0x42, msi_control);
6805 pci_disable_msix(sp->pdev);
6807 free_irq(sp->pdev->irq, dev);
6809 /* Waiting till all Interrupt handlers are complete */
6813 if (!atomic_read(&sp->isr_cnt))
6819 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6822 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6823 unsigned long flags;
6824 register u64 val64 = 0;
6826 del_timer_sync(&sp->alarm_timer);
6827 /* If s2io_set_link task is executing, wait till it completes. */
6828 while (test_and_set_bit(0, &(sp->link_state))) {
6831 atomic_set(&sp->card_state, CARD_DOWN);
6833 /* disable Tx and Rx traffic on the NIC */
6840 tasklet_kill(&sp->task);
6842 /* Check if the device is Quiescent and then Reset the NIC */
6844 /* As per the HW requirement we need to replenish the
6845 * receive buffer to avoid the ring bump. Since there is
6846 * no intention of processing the Rx frame at this pointwe are
6847 * just settting the ownership bit of rxd in Each Rx
6848 * ring to HW and set the appropriate buffer size
6849 * based on the ring mode
6851 rxd_owner_bit_reset(sp);
6853 val64 = readq(&bar0->adapter_status);
6854 if (verify_xena_quiescence(sp)) {
6855 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6863 "s2io_close:Device not Quiescent ");
6864 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6865 (unsigned long long) val64);
6872 spin_lock_irqsave(&sp->tx_lock, flags);
6873 /* Free all Tx buffers */
6874 free_tx_buffers(sp);
6875 spin_unlock_irqrestore(&sp->tx_lock, flags);
6877 /* Free all Rx buffers */
6878 spin_lock_irqsave(&sp->rx_lock, flags);
6879 free_rx_buffers(sp);
6880 spin_unlock_irqrestore(&sp->rx_lock, flags);
6882 clear_bit(0, &(sp->link_state));
6885 static void s2io_card_down(struct s2io_nic * sp)
6887 do_s2io_card_down(sp, 1);
6890 static int s2io_card_up(struct s2io_nic * sp)
6893 struct mac_info *mac_control;
6894 struct config_param *config;
6895 struct net_device *dev = (struct net_device *) sp->dev;
6898 /* Initialize the H/W I/O registers */
6899 if (init_nic(sp) != 0) {
6900 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6907 * Initializing the Rx buffers. For now we are considering only 1
6908 * Rx ring and initializing buffers into 30 Rx blocks
6910 mac_control = &sp->mac_control;
6911 config = &sp->config;
6913 for (i = 0; i < config->rx_ring_num; i++) {
6914 if ((ret = fill_rx_buffers(sp, i))) {
6915 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6918 free_rx_buffers(sp);
6921 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6922 atomic_read(&sp->rx_bufs_left[i]));
6924 /* Maintain the state prior to the open */
6925 if (sp->promisc_flg)
6926 sp->promisc_flg = 0;
6927 if (sp->m_cast_flg) {
6929 sp->all_multi_pos= 0;
6932 /* Setting its receive mode */
6933 s2io_set_multicast(dev);
6936 /* Initialize max aggregatable pkts per session based on MTU */
6937 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6938 /* Check if we can use(if specified) user provided value */
6939 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6940 sp->lro_max_aggr_per_sess = lro_max_pkts;
6943 /* Enable Rx Traffic and interrupts on the NIC */
6944 if (start_nic(sp)) {
6945 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6947 free_rx_buffers(sp);
6951 /* Add interrupt service routine */
6952 if (s2io_add_isr(sp) != 0) {
6953 if (sp->intr_type == MSI_X)
6956 free_rx_buffers(sp);
6960 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6962 /* Enable tasklet for the device */
6963 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6965 /* Enable select interrupts */
6966 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6967 if (sp->intr_type != INTA)
6968 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6970 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6971 interruptible |= TX_PIC_INTR;
6972 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6976 atomic_set(&sp->card_state, CARD_UP);
6981 * s2io_restart_nic - Resets the NIC.
6982 * @data : long pointer to the device private structure
6984 * This function is scheduled to be run by the s2io_tx_watchdog
6985 * function after 0.5 secs to reset the NIC. The idea is to reduce
6986 * the run time of the watch dog routine which is run holding a
6990 static void s2io_restart_nic(struct work_struct *work)
6992 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6993 struct net_device *dev = sp->dev;
6997 if (!netif_running(dev))
7001 if (s2io_card_up(sp)) {
7002 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7005 netif_wake_queue(dev);
7006 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7013 * s2io_tx_watchdog - Watchdog for transmit side.
7014 * @dev : Pointer to net device structure
7016 * This function is triggered if the Tx Queue is stopped
7017 * for a pre-defined amount of time when the Interface is still up.
7018 * If the Interface is jammed in such a situation, the hardware is
7019 * reset (by s2io_close) and restarted again (by s2io_open) to
7020 * overcome any problem that might have been caused in the hardware.
7025 static void s2io_tx_watchdog(struct net_device *dev)
7027 struct s2io_nic *sp = dev->priv;
7029 if (netif_carrier_ok(dev)) {
7030 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7031 schedule_work(&sp->rst_timer_task);
7032 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7037 * rx_osm_handler - To perform some OS related operations on SKB.
7038 * @sp: private member of the device structure,pointer to s2io_nic structure.
7039 * @skb : the socket buffer pointer.
7040 * @len : length of the packet
7041 * @cksum : FCS checksum of the frame.
7042 * @ring_no : the ring from which this RxD was extracted.
7044 * This function is called by the Rx interrupt serivce routine to perform
7045 * some OS related operations on the SKB before passing it to the upper
7046 * layers. It mainly checks if the checksum is OK, if so adds it to the
7047 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7048 * to the upper layer. If the checksum is wrong, it increments the Rx
7049 * packet error count, frees the SKB and returns error.
7051 * SUCCESS on success and -1 on failure.
7053 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7055 struct s2io_nic *sp = ring_data->nic;
7056 struct net_device *dev = (struct net_device *) sp->dev;
7057 struct sk_buff *skb = (struct sk_buff *)
7058 ((unsigned long) rxdp->Host_Control);
7059 int ring_no = ring_data->ring_no;
7060 u16 l3_csum, l4_csum;
7061 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7068 /* Check for parity error */
7070 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7072 err_mask = err >> 48;
7075 sp->mac_control.stats_info->sw_stat.
7076 rx_parity_err_cnt++;
7080 sp->mac_control.stats_info->sw_stat.
7085 sp->mac_control.stats_info->sw_stat.
7086 rx_parity_abort_cnt++;
7090 sp->mac_control.stats_info->sw_stat.
7095 sp->mac_control.stats_info->sw_stat.
7100 sp->mac_control.stats_info->sw_stat.
7105 sp->mac_control.stats_info->sw_stat.
7106 rx_buf_size_err_cnt++;
7110 sp->mac_control.stats_info->sw_stat.
7111 rx_rxd_corrupt_cnt++;
7115 sp->mac_control.stats_info->sw_stat.
7120 * Drop the packet if bad transfer code. Exception being
7121 * 0x5, which could be due to unsupported IPv6 extension header.
7122 * In this case, we let stack handle the packet.
7123 * Note that in this case, since checksum will be incorrect,
7124 * stack will validate the same.
7126 if (err_mask != 0x5) {
7127 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7128 dev->name, err_mask);
7129 sp->stats.rx_crc_errors++;
7130 sp->mac_control.stats_info->sw_stat.mem_freed
7133 atomic_dec(&sp->rx_bufs_left[ring_no]);
7134 rxdp->Host_Control = 0;
7139 /* Updating statistics */
7140 sp->stats.rx_packets++;
7141 rxdp->Host_Control = 0;
7142 if (sp->rxd_mode == RXD_MODE_1) {
7143 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7145 sp->stats.rx_bytes += len;
7148 } else if (sp->rxd_mode == RXD_MODE_3B) {
7149 int get_block = ring_data->rx_curr_get_info.block_index;
7150 int get_off = ring_data->rx_curr_get_info.offset;
7151 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7152 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7153 unsigned char *buff = skb_push(skb, buf0_len);
7155 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7156 sp->stats.rx_bytes += buf0_len + buf2_len;
7157 memcpy(buff, ba->ba_0, buf0_len);
7158 skb_put(skb, buf2_len);
7161 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7162 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7164 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7165 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7166 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7168 * NIC verifies if the Checksum of the received
7169 * frame is Ok or not and accordingly returns
7170 * a flag in the RxD.
7172 skb->ip_summed = CHECKSUM_UNNECESSARY;
7178 ret = s2io_club_tcp_session(skb->data, &tcp,
7179 &tcp_len, &lro, rxdp, sp);
7181 case 3: /* Begin anew */
7184 case 1: /* Aggregate */
7186 lro_append_pkt(sp, lro,
7190 case 4: /* Flush session */
7192 lro_append_pkt(sp, lro,
7194 queue_rx_frame(lro->parent);
7195 clear_lro_session(lro);
7196 sp->mac_control.stats_info->
7197 sw_stat.flush_max_pkts++;
7200 case 2: /* Flush both */
7201 lro->parent->data_len =
7203 sp->mac_control.stats_info->
7204 sw_stat.sending_both++;
7205 queue_rx_frame(lro->parent);
7206 clear_lro_session(lro);
7208 case 0: /* sessions exceeded */
7209 case -1: /* non-TCP or not
7213 * First pkt in session not
7214 * L3/L4 aggregatable
7219 "%s: Samadhana!!\n",
7226 * Packet with erroneous checksum, let the
7227 * upper layers deal with it.
7229 skb->ip_summed = CHECKSUM_NONE;
7232 skb->ip_summed = CHECKSUM_NONE;
7234 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7236 skb->protocol = eth_type_trans(skb, dev);
7237 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7239 /* Queueing the vlan frame to the upper layer */
7241 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7242 RXD_GET_VLAN_TAG(rxdp->Control_2));
7244 vlan_hwaccel_rx(skb, sp->vlgrp,
7245 RXD_GET_VLAN_TAG(rxdp->Control_2));
7248 netif_receive_skb(skb);
7254 queue_rx_frame(skb);
7256 dev->last_rx = jiffies;
7258 atomic_dec(&sp->rx_bufs_left[ring_no]);
7263 * s2io_link - stops/starts the Tx queue.
7264 * @sp : private member of the device structure, which is a pointer to the
7265 * s2io_nic structure.
7266 * @link : inidicates whether link is UP/DOWN.
7268 * This function stops/starts the Tx queue depending on whether the link
7269 * status of the NIC is is down or up. This is called by the Alarm
7270 * interrupt handler whenever a link change interrupt comes up.
7275 static void s2io_link(struct s2io_nic * sp, int link)
7277 struct net_device *dev = (struct net_device *) sp->dev;
7279 if (link != sp->last_link_state) {
7280 if (link == LINK_DOWN) {
7281 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7282 netif_carrier_off(dev);
7283 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7284 sp->mac_control.stats_info->sw_stat.link_up_time =
7285 jiffies - sp->start_time;
7286 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7288 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7289 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7290 sp->mac_control.stats_info->sw_stat.link_down_time =
7291 jiffies - sp->start_time;
7292 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7293 netif_carrier_on(dev);
7296 sp->last_link_state = link;
7297 sp->start_time = jiffies;
7301 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7302 * @sp : private member of the device structure, which is a pointer to the
7303 * s2io_nic structure.
7305 * This function initializes a few of the PCI and PCI-X configuration registers
7306 * with recommended values.
7311 static void s2io_init_pci(struct s2io_nic * sp)
7313 u16 pci_cmd = 0, pcix_cmd = 0;
7315 /* Enable Data Parity Error Recovery in PCI-X command register. */
7316 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7318 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7320 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7323 /* Set the PErr Response bit in PCI command register. */
7324 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7325 pci_write_config_word(sp->pdev, PCI_COMMAND,
7326 (pci_cmd | PCI_COMMAND_PARITY));
7327 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7330 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7332 if ( tx_fifo_num > 8) {
7333 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7335 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7338 if ( rx_ring_num > 8) {
7339 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7341 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7344 if (*dev_intr_type != INTA)
7347 #ifndef CONFIG_PCI_MSI
7348 if (*dev_intr_type != INTA) {
7349 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7350 "MSI/MSI-X. Defaulting to INTA\n");
7351 *dev_intr_type = INTA;
7354 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7355 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7356 "Defaulting to INTA\n");
7357 *dev_intr_type = INTA;
7360 if ((*dev_intr_type == MSI_X) &&
7361 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7362 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7363 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7364 "Defaulting to INTA\n");
7365 *dev_intr_type = INTA;
7368 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7369 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7370 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7377 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7378 * or Traffic class respectively.
7379 * @nic: device peivate variable
7380 * Description: The function configures the receive steering to
7381 * desired receive ring.
7382 * Return Value: SUCCESS on success and
7383 * '-1' on failure (endian settings incorrect).
7385 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7387 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7388 register u64 val64 = 0;
7390 if (ds_codepoint > 63)
7393 val64 = RTS_DS_MEM_DATA(ring);
7394 writeq(val64, &bar0->rts_ds_mem_data);
7396 val64 = RTS_DS_MEM_CTRL_WE |
7397 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7398 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7400 writeq(val64, &bar0->rts_ds_mem_ctrl);
7402 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7403 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7408 * s2io_init_nic - Initialization of the adapter .
7409 * @pdev : structure containing the PCI related information of the device.
7410 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7412 * The function initializes an adapter identified by the pci_dec structure.
7413 * All OS related initialization including memory and device structure and
7414 * initlaization of the device private variable is done. Also the swapper
7415 * control register is initialized to enable read and write into the I/O
7416 * registers of the device.
7418 * returns 0 on success and negative on failure.
7421 static int __devinit
7422 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7424 struct s2io_nic *sp;
7425 struct net_device *dev;
7427 int dma_flag = FALSE;
7428 u32 mac_up, mac_down;
7429 u64 val64 = 0, tmp64 = 0;
7430 struct XENA_dev_config __iomem *bar0 = NULL;
7432 struct mac_info *mac_control;
7433 struct config_param *config;
7435 u8 dev_intr_type = intr_type;
7437 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7440 if ((ret = pci_enable_device(pdev))) {
7442 "s2io_init_nic: pci_enable_device failed\n");
7446 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7447 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7449 if (pci_set_consistent_dma_mask
7450 (pdev, DMA_64BIT_MASK)) {
7452 "Unable to obtain 64bit DMA for \
7453 consistent allocations\n");
7454 pci_disable_device(pdev);
7457 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7458 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7460 pci_disable_device(pdev);
7463 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7464 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7465 pci_disable_device(pdev);
7469 dev = alloc_etherdev(sizeof(struct s2io_nic));
7471 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7472 pci_disable_device(pdev);
7473 pci_release_regions(pdev);
7477 pci_set_master(pdev);
7478 pci_set_drvdata(pdev, dev);
7479 SET_MODULE_OWNER(dev);
7480 SET_NETDEV_DEV(dev, &pdev->dev);
7482 /* Private member variable initialized to s2io NIC structure */
7484 memset(sp, 0, sizeof(struct s2io_nic));
7487 sp->high_dma_flag = dma_flag;
7488 sp->device_enabled_once = FALSE;
7489 if (rx_ring_mode == 1)
7490 sp->rxd_mode = RXD_MODE_1;
7491 if (rx_ring_mode == 2)
7492 sp->rxd_mode = RXD_MODE_3B;
7494 sp->intr_type = dev_intr_type;
7496 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7497 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7498 sp->device_type = XFRAME_II_DEVICE;
7500 sp->device_type = XFRAME_I_DEVICE;
7504 /* Initialize some PCI/PCI-X fields of the NIC. */
7508 * Setting the device configuration parameters.
7509 * Most of these parameters can be specified by the user during
7510 * module insertion as they are module loadable parameters. If
7511 * these parameters are not not specified during load time, they
7512 * are initialized with default values.
7514 mac_control = &sp->mac_control;
7515 config = &sp->config;
7517 /* Tx side parameters. */
7518 config->tx_fifo_num = tx_fifo_num;
7519 for (i = 0; i < MAX_TX_FIFOS; i++) {
7520 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7521 config->tx_cfg[i].fifo_priority = i;
7524 /* mapping the QoS priority to the configured fifos */
7525 for (i = 0; i < MAX_TX_FIFOS; i++)
7526 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7528 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7529 for (i = 0; i < config->tx_fifo_num; i++) {
7530 config->tx_cfg[i].f_no_snoop =
7531 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7532 if (config->tx_cfg[i].fifo_len < 65) {
7533 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7537 /* + 2 because one Txd for skb->data and one Txd for UFO */
7538 config->max_txds = MAX_SKB_FRAGS + 2;
7540 /* Rx side parameters. */
7541 config->rx_ring_num = rx_ring_num;
7542 for (i = 0; i < MAX_RX_RINGS; i++) {
7543 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7544 (rxd_count[sp->rxd_mode] + 1);
7545 config->rx_cfg[i].ring_priority = i;
7548 for (i = 0; i < rx_ring_num; i++) {
7549 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7550 config->rx_cfg[i].f_no_snoop =
7551 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7554 /* Setting Mac Control parameters */
7555 mac_control->rmac_pause_time = rmac_pause_time;
7556 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7557 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7560 /* Initialize Ring buffer parameters. */
7561 for (i = 0; i < config->rx_ring_num; i++)
7562 atomic_set(&sp->rx_bufs_left[i], 0);
7564 /* Initialize the number of ISRs currently running */
7565 atomic_set(&sp->isr_cnt, 0);
7567 /* initialize the shared memory used by the NIC and the host */
7568 if (init_shared_mem(sp)) {
7569 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7572 goto mem_alloc_failed;
7575 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7576 pci_resource_len(pdev, 0));
7578 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7581 goto bar0_remap_failed;
7584 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7585 pci_resource_len(pdev, 2));
7587 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7590 goto bar1_remap_failed;
7593 dev->irq = pdev->irq;
7594 dev->base_addr = (unsigned long) sp->bar0;
7596 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7597 for (j = 0; j < MAX_TX_FIFOS; j++) {
7598 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7599 (sp->bar1 + (j * 0x00020000));
7602 /* Driver entry points */
7603 dev->open = &s2io_open;
7604 dev->stop = &s2io_close;
7605 dev->hard_start_xmit = &s2io_xmit;
7606 dev->get_stats = &s2io_get_stats;
7607 dev->set_multicast_list = &s2io_set_multicast;
7608 dev->do_ioctl = &s2io_ioctl;
7609 dev->change_mtu = &s2io_change_mtu;
7610 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7611 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7612 dev->vlan_rx_register = s2io_vlan_rx_register;
7615 * will use eth_mac_addr() for dev->set_mac_address
7616 * mac address will be set every time dev->open() is called
7618 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 dev->poll_controller = s2io_netpoll;
7624 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7625 if (sp->high_dma_flag == TRUE)
7626 dev->features |= NETIF_F_HIGHDMA;
7627 dev->features |= NETIF_F_TSO;
7628 dev->features |= NETIF_F_TSO6;
7629 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7630 dev->features |= NETIF_F_UFO;
7631 dev->features |= NETIF_F_HW_CSUM;
7634 dev->tx_timeout = &s2io_tx_watchdog;
7635 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7636 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7637 INIT_WORK(&sp->set_link_task, s2io_set_link);
7639 pci_save_state(sp->pdev);
7641 /* Setting swapper control on the NIC, for proper reset operation */
7642 if (s2io_set_swapper(sp)) {
7643 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7646 goto set_swap_failed;
7649 /* Verify if the Herc works on the slot its placed into */
7650 if (sp->device_type & XFRAME_II_DEVICE) {
7651 mode = s2io_verify_pci_mode(sp);
7653 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7654 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7656 goto set_swap_failed;
7660 /* Not needed for Herc */
7661 if (sp->device_type & XFRAME_I_DEVICE) {
7663 * Fix for all "FFs" MAC address problems observed on
7666 fix_mac_address(sp);
7671 * MAC address initialization.
7672 * For now only one mac address will be read and used.
7675 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7676 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7677 writeq(val64, &bar0->rmac_addr_cmd_mem);
7678 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7679 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7680 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7681 mac_down = (u32) tmp64;
7682 mac_up = (u32) (tmp64 >> 32);
7684 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7685 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7686 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7687 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7688 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7689 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7691 /* Set the factory defined MAC address initially */
7692 dev->addr_len = ETH_ALEN;
7693 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7695 /* Store the values of the MSIX table in the s2io_nic structure */
7696 store_xmsi_data(sp);
7697 /* reset Nic and bring it to known state */
7701 * Initialize the tasklet status and link state flags
7702 * and the card state parameter
7704 atomic_set(&(sp->card_state), 0);
7705 sp->tasklet_status = 0;
7708 /* Initialize spinlocks */
7709 spin_lock_init(&sp->tx_lock);
7712 spin_lock_init(&sp->put_lock);
7713 spin_lock_init(&sp->rx_lock);
7716 * SXE-002: Configure link and activity LED to init state
7719 subid = sp->pdev->subsystem_device;
7720 if ((subid & 0xFF) >= 0x07) {
7721 val64 = readq(&bar0->gpio_control);
7722 val64 |= 0x0000800000000000ULL;
7723 writeq(val64, &bar0->gpio_control);
7724 val64 = 0x0411040400000000ULL;
7725 writeq(val64, (void __iomem *) bar0 + 0x2700);
7726 val64 = readq(&bar0->gpio_control);
7729 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7731 if (register_netdev(dev)) {
7732 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7734 goto register_failed;
7737 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7738 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7739 sp->product_name, pdev->revision);
7740 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7741 s2io_driver_version);
7742 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7743 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7744 sp->def_mac_addr[0].mac_addr[0],
7745 sp->def_mac_addr[0].mac_addr[1],
7746 sp->def_mac_addr[0].mac_addr[2],
7747 sp->def_mac_addr[0].mac_addr[3],
7748 sp->def_mac_addr[0].mac_addr[4],
7749 sp->def_mac_addr[0].mac_addr[5]);
7750 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7751 if (sp->device_type & XFRAME_II_DEVICE) {
7752 mode = s2io_print_pci_mode(sp);
7754 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7756 unregister_netdev(dev);
7757 goto set_swap_failed;
7760 switch(sp->rxd_mode) {
7762 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7766 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7772 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7773 switch(sp->intr_type) {
7775 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7778 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7782 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7785 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7786 " enabled\n", dev->name);
7787 /* Initialize device name */
7788 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7790 /* Initialize bimodal Interrupts */
7791 sp->config.bimodal = bimodal;
7792 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7793 sp->config.bimodal = 0;
7794 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7799 * Make Link state as off at this point, when the Link change
7800 * interrupt comes the state will be automatically changed to
7803 netif_carrier_off(dev);
7814 free_shared_mem(sp);
7815 pci_disable_device(pdev);
7816 pci_release_regions(pdev);
7817 pci_set_drvdata(pdev, NULL);
7824 * s2io_rem_nic - Free the PCI device
7825 * @pdev: structure containing the PCI related information of the device.
7826 * Description: This function is called by the Pci subsystem to release a
7827 * PCI device and free up all resource held up by the device. This could
7828 * be in response to a Hot plug event or when the driver is to be removed
7832 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7834 struct net_device *dev =
7835 (struct net_device *) pci_get_drvdata(pdev);
7836 struct s2io_nic *sp;
7839 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7843 flush_scheduled_work();
7846 unregister_netdev(dev);
7848 free_shared_mem(sp);
7851 pci_release_regions(pdev);
7852 pci_set_drvdata(pdev, NULL);
7854 pci_disable_device(pdev);
7858 * s2io_starter - Entry point for the driver
7859 * Description: This function is the entry point for the driver. It verifies
7860 * the module loadable parameters and initializes PCI configuration space.
7863 int __init s2io_starter(void)
7865 return pci_register_driver(&s2io_driver);
7869 * s2io_closer - Cleanup routine for the driver
7870 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7873 static __exit void s2io_closer(void)
7875 pci_unregister_driver(&s2io_driver);
7876 DBG_PRINT(INIT_DBG, "cleanup done\n");
7879 module_init(s2io_starter);
7880 module_exit(s2io_closer);
7882 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7883 struct tcphdr **tcp, struct RxD_t *rxdp)
7886 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7888 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7889 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7895 * By default the VLAN field in the MAC is stripped by the card, if this
7896 * feature is turned off in rx_pa_cfg register, then the ip_off field
7897 * has to be shifted by a further 2 bytes
7900 case 0: /* DIX type */
7901 case 4: /* DIX type with VLAN */
7902 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7904 /* LLC, SNAP etc are considered non-mergeable */
7909 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7910 ip_len = (u8)((*ip)->ihl);
7912 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7917 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7920 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7921 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7922 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7927 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7929 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7932 static void initiate_new_session(struct lro *lro, u8 *l2h,
7933 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7935 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7939 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7940 lro->tcp_ack = ntohl(tcp->ack_seq);
7942 lro->total_len = ntohs(ip->tot_len);
7945 * check if we saw TCP timestamp. Other consistency checks have
7946 * already been done.
7948 if (tcp->doff == 8) {
7950 ptr = (u32 *)(tcp+1);
7952 lro->cur_tsval = *(ptr+1);
7953 lro->cur_tsecr = *(ptr+2);
7958 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7960 struct iphdr *ip = lro->iph;
7961 struct tcphdr *tcp = lro->tcph;
7963 struct stat_block *statinfo = sp->mac_control.stats_info;
7964 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7966 /* Update L3 header */
7967 ip->tot_len = htons(lro->total_len);
7969 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7972 /* Update L4 header */
7973 tcp->ack_seq = lro->tcp_ack;
7974 tcp->window = lro->window;
7976 /* Update tsecr field if this session has timestamps enabled */
7978 u32 *ptr = (u32 *)(tcp + 1);
7979 *(ptr+2) = lro->cur_tsecr;
7982 /* Update counters required for calculation of
7983 * average no. of packets aggregated.
7985 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7986 statinfo->sw_stat.num_aggregations++;
7989 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7990 struct tcphdr *tcp, u32 l4_pyld)
7992 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7993 lro->total_len += l4_pyld;
7994 lro->frags_len += l4_pyld;
7995 lro->tcp_next_seq += l4_pyld;
7998 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7999 lro->tcp_ack = tcp->ack_seq;
8000 lro->window = tcp->window;
8004 /* Update tsecr and tsval from this packet */
8005 ptr = (u32 *) (tcp + 1);
8006 lro->cur_tsval = *(ptr + 1);
8007 lro->cur_tsecr = *(ptr + 2);
8011 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8012 struct tcphdr *tcp, u32 tcp_pyld_len)
8016 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8018 if (!tcp_pyld_len) {
8019 /* Runt frame or a pure ack */
8023 if (ip->ihl != 5) /* IP has options */
8026 /* If we see CE codepoint in IP header, packet is not mergeable */
8027 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8030 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8031 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8032 tcp->ece || tcp->cwr || !tcp->ack) {
8034 * Currently recognize only the ack control word and
8035 * any other control field being set would result in
8036 * flushing the LRO session
8042 * Allow only one TCP timestamp option. Don't aggregate if
8043 * any other options are detected.
8045 if (tcp->doff != 5 && tcp->doff != 8)
8048 if (tcp->doff == 8) {
8049 ptr = (u8 *)(tcp + 1);
8050 while (*ptr == TCPOPT_NOP)
8052 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8055 /* Ensure timestamp value increases monotonically */
8057 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8060 /* timestamp echo reply should be non-zero */
8061 if (*((u32 *)(ptr+6)) == 0)
8069 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8070 struct RxD_t *rxdp, struct s2io_nic *sp)
8073 struct tcphdr *tcph;
8076 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8078 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8079 ip->saddr, ip->daddr);
8084 tcph = (struct tcphdr *)*tcp;
8085 *tcp_len = get_l4_pyld_length(ip, tcph);
8086 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8087 struct lro *l_lro = &sp->lro0_n[i];
8088 if (l_lro->in_use) {
8089 if (check_for_socket_match(l_lro, ip, tcph))
8091 /* Sock pair matched */
8094 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8095 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8096 "0x%x, actual 0x%x\n", __FUNCTION__,
8097 (*lro)->tcp_next_seq,
8100 sp->mac_control.stats_info->
8101 sw_stat.outof_sequence_pkts++;
8106 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8107 ret = 1; /* Aggregate */
8109 ret = 2; /* Flush both */
8115 /* Before searching for available LRO objects,
8116 * check if the pkt is L3/L4 aggregatable. If not
8117 * don't create new LRO session. Just send this
8120 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8124 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8125 struct lro *l_lro = &sp->lro0_n[i];
8126 if (!(l_lro->in_use)) {
8128 ret = 3; /* Begin anew */
8134 if (ret == 0) { /* sessions exceeded */
8135 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8143 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8146 update_L3L4_header(sp, *lro);
8149 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8150 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8151 update_L3L4_header(sp, *lro);
8152 ret = 4; /* Flush the LRO */
8156 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8164 static void clear_lro_session(struct lro *lro)
8166 static u16 lro_struct_size = sizeof(struct lro);
8168 memset(lro, 0, lro_struct_size);
8171 static void queue_rx_frame(struct sk_buff *skb)
8173 struct net_device *dev = skb->dev;
8175 skb->protocol = eth_type_trans(skb, dev);
8177 netif_receive_skb(skb);
8182 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8183 struct sk_buff *skb,
8186 struct sk_buff *first = lro->parent;
8188 first->len += tcp_len;
8189 first->data_len = lro->frags_len;
8190 skb_pull(skb, (skb->len - tcp_len));
8191 if (skb_shinfo(first)->frag_list)
8192 lro->last_frag->next = skb;
8194 skb_shinfo(first)->frag_list = skb;
8195 first->truesize += skb->truesize;
8196 lro->last_frag = skb;
8197 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8202 * s2io_io_error_detected - called when PCI error is detected
8203 * @pdev: Pointer to PCI device
8204 * @state: The current pci connection state
8206 * This function is called after a PCI bus error affecting
8207 * this device has been detected.
8209 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8210 pci_channel_state_t state)
8212 struct net_device *netdev = pci_get_drvdata(pdev);
8213 struct s2io_nic *sp = netdev->priv;
8215 netif_device_detach(netdev);
8217 if (netif_running(netdev)) {
8218 /* Bring down the card, while avoiding PCI I/O */
8219 do_s2io_card_down(sp, 0);
8221 pci_disable_device(pdev);
8223 return PCI_ERS_RESULT_NEED_RESET;
8227 * s2io_io_slot_reset - called after the pci bus has been reset.
8228 * @pdev: Pointer to PCI device
8230 * Restart the card from scratch, as if from a cold-boot.
8231 * At this point, the card has exprienced a hard reset,
8232 * followed by fixups by BIOS, and has its config space
8233 * set up identically to what it was at cold boot.
8235 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8237 struct net_device *netdev = pci_get_drvdata(pdev);
8238 struct s2io_nic *sp = netdev->priv;
8240 if (pci_enable_device(pdev)) {
8241 printk(KERN_ERR "s2io: "
8242 "Cannot re-enable PCI device after reset.\n");
8243 return PCI_ERS_RESULT_DISCONNECT;
8246 pci_set_master(pdev);
8249 return PCI_ERS_RESULT_RECOVERED;
8253 * s2io_io_resume - called when traffic can start flowing again.
8254 * @pdev: Pointer to PCI device
8256 * This callback is called when the error recovery driver tells
8257 * us that its OK to resume normal operation.
8259 static void s2io_io_resume(struct pci_dev *pdev)
8261 struct net_device *netdev = pci_get_drvdata(pdev);
8262 struct s2io_nic *sp = netdev->priv;
8264 if (netif_running(netdev)) {
8265 if (s2io_card_up(sp)) {
8266 printk(KERN_ERR "s2io: "
8267 "Can't bring device back up after reset.\n");
8271 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8273 printk(KERN_ERR "s2io: "
8274 "Can't resetore mac addr after reset.\n");
8279 netif_device_attach(netdev);
8280 netif_wake_queue(netdev);