###########################################################################
# Driver files
-FAMILYC = e1000_82575.c
-FAMILYH = e1000_82575.h
+FAMILYC = e1000_82575.c e1000_i210.c
+FAMILYH = e1000_82575.h e1000_i210.h
# core driver files
CFILES = igb_main.c $(FAMILYC) e1000_mac.c e1000_nvm.c e1000_phy.c \
e1000_manage.c igb_param.c igb_ethtool.c kcompat.c e1000_api.c \
- e1000_mbx.c igb_vmdq.c e1000_i210.c igb_sysfs.c igb_procfs.c \
- igb_ptp.c
+ e1000_mbx.c igb_vmdq.c igb_sysfs.c igb_procfs.c igb_ptp.c
HFILES = igb.h e1000_hw.h e1000_osdep.h e1000_defines.h e1000_mac.h \
e1000_nvm.h e1000_manage.h $(FAMILYH) kcompat.h e1000_regs.h \
- e1000_api.h igb_regtest.h e1000_mbx.h igb_vmdq.h e1000_i210.h
+ e1000_api.h igb_regtest.h e1000_mbx.h igb_vmdq.h
ifeq (,$(BUILD_KERNEL))
BUILD_KERNEL=$(shell uname -r)
endif
endif
endif
-else # ifeq ($(K_VERSION),2.6)
+else # ifeq (1,$(shell [ $(KVER_CODE) -ge 132352 ] && echo 1 || echo 0))
# Makefile for 2.4.x kernel
TARGET = $(DRIVER_NAME).o
default:
$(MAKE)
-endif # ifeq ($(K_VERSION),2.6)
+endif # ifeq (1,$(shell [ $(KVER_CODE) -ge 132352 ] && echo 1 || echo 0))
ifeq (,$(MANDIR))
# find the best place to install the man page
* 82575GB Gigabit Network Connection
* 82576 Gigabit Network Connection
* 82576 Quad Port Gigabit Mezzanine Adapter
+ * 82580 Gigabit Network Connection
+ * I350 Gigabit Network Connection
*/
#include "e1000_api.h"
static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
-static s32 e1000_get_protected_blocks_82576(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 *blocks_size, u32 block_type_mask,
- u16 *eeprom_buffer, u32 eeprom_size);
-static s32 e1000_get_protected_blocks_82580(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 *blocks_size, u32 block_type_mask,
- u16 *eeprom_buffer, u32 eeprom_size);
static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
u32 offset, u16 *data);
}
}
+ /* Set phy->phy_addr and phy->id. */
ret_val = e1000_get_phy_id_82575(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
- case M88E1145_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1340M_E_PHY_ID:
nvm->ops.validate = e1000_validate_nvm_checksum_generic;
nvm->ops.update = e1000_update_nvm_checksum_generic;
nvm->ops.valid_led_default = e1000_valid_led_default_82575;
- switch (hw->mac.type) {
- case e1000_82576:
- nvm->ops.get_protected_blocks =
- e1000_get_protected_blocks_82576;
- break;
- case e1000_82580:
- case e1000_i350:
- nvm->ops.get_protected_blocks =
- e1000_get_protected_blocks_82580;
- break;
- default:
- break;
- }
/* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_82575;
- /* receive address register setting */
- mac->ops.rar_set = e1000_rar_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
/* configure collision distance */
mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
- if (hw->mac.type == e1000_i350) {
+ if (mac->type == e1000_i350) {
/* writing VFTA */
mac->ops.write_vfta = e1000_write_vfta_i350;
/* clearing VFTA */
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
}
+ if (hw->mac.type >= e1000_82580)
+ mac->ops.validate_mdi_setting =
+ e1000_validate_mdi_setting_crossover_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
- u16 data;
+ u32 data;
DEBUGFUNC("e1000_set_d0_lplu_state_82580");
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
- u16 data;
+ u32 data;
DEBUGFUNC("e1000_set_d3_lplu_state_82580");
DEBUGOUT("MNG configuration cycle has not completed.\n");
/* If EEPROM is not marked present, init the PHY manually */
- if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
(hw->phy.type == e1000_phy_igp_3))
e1000_phy_init_script_igp3(hw);
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
} else {
ret_val = e1000_check_for_copper_link_generic(hw);
}
DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
- /* Set up defaults for the return values of this function */
- mac->serdes_has_link = false;
- *speed = 0;
- *duplex = 0;
-
/*
* Read the PCS Status register for link state. For non-copper mode,
* the status register is not accurate. The PCS status register is
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
+ } else {
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
}
return E1000_SUCCESS;
}
/* If EEPROM is not present, run manual init scripts */
- if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
e1000_reset_init_script_82575(hw);
/* Clear any pending interrupt events. */
{
u32 ctrl;
s32 ret_val;
+ u32 phpm_reg;
DEBUGFUNC("e1000_setup_copper_link_82575");
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ /* Clear Go Link Disconnect bit */
+ if (hw->mac.type >= e1000_82580) {
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
ret_val = e1000_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
**/
static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
{
- u32 ctrl_ext, ctrl_reg, reg;
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
- /*
- * We force flow control to prevent the CTRL register values from being
- * overwritten by the autonegotiated flow control values
- */
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
if (pcs_autoneg) {
/* Set PCS register for autoneg */
reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
- if (!e1000_sgmii_active_82575(hw))
+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
e1000_force_mac_fc_generic(hw);
return ret_val;
}
/* Read Init Control Word #3*/
hw->nvm.ops.read(hw, init_ctrl_wd_3_offset, 1, &init_ctrl_wd_3);
- current_link_mode = init_ctrl_wd_3;
- /*
- * Switch to CSR for all but internal PHY.
- */
- if ((init_ctrl_wd_3 << (E1000_CTRL_EXT_LINK_MODE_OFFSET -
- init_ctrl_wd_3_bit_offset)) !=
- E1000_CTRL_EXT_LINK_MODE_GMII) {
- current_link_mode = ctrl_ext;
- init_ctrl_wd_3_bit_offset =
- E1000_CTRL_EXT_LINK_MODE_OFFSET;
- }
- } else {
- /* Take link mode from CSR */
- current_link_mode = ctrl_ext;
- init_ctrl_wd_3_bit_offset = E1000_CTRL_EXT_LINK_MODE_OFFSET;
- }
/*
* Align link mode bits to
* their CTRL_EXT location.
*/
+ current_link_mode = init_ctrl_wd_3;
current_link_mode <<= (E1000_CTRL_EXT_LINK_MODE_OFFSET -
init_ctrl_wd_3_bit_offset);
current_link_mode &= E1000_CTRL_EXT_LINK_MODE_MASK;
+ /*
+ * Switch to CSR for all but internal PHY.
+ */
+ if (current_link_mode != E1000_CTRL_EXT_LINK_MODE_GMII)
+ /* Take link mode from CSR */
+ current_link_mode = ctrl_ext &
+ E1000_CTRL_EXT_LINK_MODE_MASK;
+ } else {
+ /* Take link mode from CSR */
+ current_link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+ }
switch (current_link_mode) {
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
goto out;
if (hw->phy.media_type ==
e1000_media_type_internal_serdes) {
+ /* Keep Link Mode as SGMII for 100BaseFX */
+ if (!dev_spec->eth_flags.e100_base_fx) {
current_link_mode =
E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ }
} else if (hw->phy.media_type ==
e1000_media_type_copper) {
current_link_mode =
s32 ret_val = E1000_ERR_CONFIG;
u32 ctrl_ext = 0;
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
- struct sfp_e1000_flags eth_flags = {0};
+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
u8 tranceiver_type = 0;
+ s32 timeout = 3;
- /* Turn I2C interface ON */
+ /* Turn I2C interface ON and power on sfp cage */
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+ E1000_WRITE_FLUSH(hw);
/* Read SFP module data */
+ while (timeout) {
ret_val = e1000_read_sfp_data_byte(hw,
E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
&tranceiver_type);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ msec_delay(100);
+ timeout--;
+ }
if (ret_val != E1000_SUCCESS)
goto out;
ret_val = e1000_read_sfp_data_byte(hw,
E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
- (u8 *)ð_flags);
+ (u8 *)eth_flags);
if (ret_val != E1000_SUCCESS)
goto out;
/*
if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
(tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
dev_spec->module_plugged = true;
- if (eth_flags.e1000_base_lx || eth_flags.e1000_base_sx) {
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e100_base_fx) {
+ dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags.e1000_base_t) {
+ } else if (eth_flags->e1000_base_t) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_copper;
} else {
}
/* If EEPROM is not present, run manual init scripts */
- if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
e1000_reset_init_script_82575(hw);
/* clear global device reset status bit */
goto out;
}
- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
/* set compatibility bit to validate checksums appropriately */
nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
+ /* keep the LPI clock running before EEE is enabled */
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ u32 eee_su;
+ eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
+ eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
+ E1000_WRITE_REG(hw, E1000_EEE_SU, eee_su);
+ }
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
u32 retry = 1;
u16 swfw_mask = 0;
- bool nack = 1;
+ bool nack = true;
DEBUGFUNC("e1000_read_i2c_byte_generic");
u32 i = 0;
u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
u32 timeout = 10;
- bool ack = 1;
+ bool ack = true;
DEBUGFUNC("e1000_get_i2c_ack");
return E1000_ERR_I2C;
ack = e1000_get_i2c_data(&i2cctl);
- if (ack == 1) {
+ if (ack) {
DEBUGOUT("I2C ack was not received.\n");
status = E1000_ERR_I2C;
}
}
return status;
}
-
-/**
- * e1000_get_protected_blocks_82576 - Get the list of protected EEPROM words
- * @hw: pointer to hardware structure
- * @blocks: buffer to contain the list of protected words
- * @blocks_size: size of the blocks buffer
- * @block_type_mask: any combination of ixgbe_eeprom_block_type types.
- *
- * This function reads masked list of protected EEPROM blocks from
- * protected_blocks_82575 list. If words is set to NULL the function return
- * the size of blocks buffer required to hold masked list of EEPROM blocks
- **/
-static s32 e1000_get_protected_blocks_82576(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 *blocks_size, u32 block_type_mask,
- u16 *eeprom_buffer, u32 eeprom_size)
-{
- struct e1000_nvm_protected_block protected_blocks_82576[] = {
- {0x0000, e1000_block_mac_address, false, 0x00, 0x03, 0xFFFF,
- NULL},
- {0x0014, e1000_block_wol_config, false, 0x00, 0x01, 0x0480,
- NULL},
- {0x0024, e1000_block_wol_config, false, 0x00, 0x01, 0x0480,
- NULL},
- {0x0030, e1000_block_preboot_data, false, 0x00, 0x07, 0xFFFF,
- NULL},
- {0x0038, e1000_block_preboot_data, false, 0x00, 0x04, 0xFFFF,
- NULL},
- {0x0037, e1000_block_alt_mac_address, true, 0x00, 0x03,
- 0xFFFF, NULL},
- {0x003D, e1000_block_iscsi_boot_config, true, 0x00, 0x00,
- 0xFFFF, NULL},
- {0x0014, e1000_block_flash_config, false, 0x00, 0x01, 0x2000,
- NULL},
- {0x0024, e1000_block_flash_config, false, 0x00, 0x01, 0x2000,
- NULL},
- {0x000F, e1000_block_flash_config, false, 0x00, 0x01, 0x0700,
- NULL},
- };
- s32 status;
- u16 table_size;
-
- DEBUGFUNC("e1000_get_protected_blocks_82576");
-
- status = -E1000_ERR_INVALID_ARGUMENT;
- if (blocks_size) {
- table_size = sizeof(protected_blocks_82576) /
- sizeof(protected_blocks_82576[0]);
-
- status = e1000_get_protected_blocks_from_table(hw,
- protected_blocks_82576, table_size, blocks,
- blocks_size, block_type_mask, eeprom_buffer,
- eeprom_size);
- }
-
- return status;
-}
-
-/**
- * e1000_get_protected_blocks_82580 - Get the list of protected EEPROM words
- * @hw: pointer to hardware structure
- * @blocks: buffer to contain the list of protected words
- * @blocks_size: size of the blocks buffer
- * @block_type_mask: any combination of ixgbe_eeprom_block_type types.
- *
- * This function reads masked list of protected EEPROM blocks from
- * protected_blocks_82575 list. If words is set to NULL the function return
- * the size of blocks buffer required to hold masked list of EEPROM blocks
- **/
-static s32 e1000_get_protected_blocks_82580(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 *blocks_size, u32 block_type_mask,
- u16 *eeprom_buffer, u32 eeprom_size)
-{
- struct e1000_nvm_protected_block protected_blocks_82580[] = {
- {0x0000, e1000_block_mac_address, false, 0x00, 0x03, 0xFFFF,
- NULL},
- {0x0080, e1000_block_mac_address, false, 0x00, 0x03, 0xFFFF,
- NULL},
- {0x00C0, e1000_block_mac_address, false, 0x00, 0x03, 0xFFFF,
- NULL},
- {0x0100, e1000_block_mac_address, false, 0x00, 0x03, 0xFFFF,
- NULL},
- {0x0024, e1000_block_wol_config, false, 0x00, 0x01, 0x0480,
- NULL},
- {0x00A4, e1000_block_wol_config, false, 0x00, 0x01, 0x0480,
- NULL},
- {0x00E4, e1000_block_wol_config, false, 0x00, 0x01, 0x0480,
- NULL},
- {0x0124, e1000_block_wol_config, false, 0x00, 0x01, 0x0480,
- NULL},
- {0x0030, e1000_block_preboot_data, false, 0x00, 0x07, 0xFFFF,
- NULL},
- {0x0038, e1000_block_preboot_data, false, 0x00, 0x04, 0xFFFF,
- NULL},
- {0x0037, e1000_block_alt_mac_address, true, 0x00, 0x0B, 0xFFFF,
- NULL},
- {0x003D, e1000_block_iscsi_boot_config, true, 0x00, 0x00,
- 0xFFFF, NULL},
- {0x000F, e1000_block_flash_config, false, 0x00, 0x01, 0x0720,
- NULL},
- };
- s32 status;
- u16 table_size;
-
- DEBUGFUNC("e1000_get_protected_blocks_82580");
-
- status = -E1000_ERR_INVALID_ARGUMENT;
- if (blocks_size) {
- table_size = sizeof(protected_blocks_82580) /
- sizeof(protected_blocks_82580[0]);
-
- status = e1000_get_protected_blocks_from_table(hw,
- protected_blocks_82580, table_size, blocks,
- blocks_size, block_type_mask, eeprom_buffer,
- eeprom_size);
- }
-
- return status;
-}
case E1000_DEV_ID_I350_DA4:
mac->type = e1000_i350;
break;
+#if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW)
+ case E1000_DEV_ID_I210_NVMLESS:
+#endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */
case E1000_DEV_ID_I210_COPPER:
case E1000_DEV_ID_I210_COPPER_OEM1:
case E1000_DEV_ID_I210_COPPER_IT:
return hw->nvm.ops.write(hw, offset, words, data);
return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_protected_block_size - Get the size of protected EEPROM block
- * @hw: pointer to hardware structure
- * @block: pointer to the protected block structure describing our block
- * @eeprom_buffer: pointer to eeprom image buffer
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function reads the size of protected EEPROM block from the EEPROM
- * content or the eeprom_buffer (if provided)
- **/
-s32 e1000_get_protected_block_size(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size)
-{
- if (hw->nvm.ops.get_protected_block_size)
- return hw->nvm.ops.get_protected_block_size(hw, block,
- eeprom_buffer, eeprom_buffer_size);
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_get_protected_blocks - Get the list of protected EEPROM words
- * @hw: pointer to hardware structure
- * @blocks: buffer to contain the list of protected words
- * @blocks_number: size of the words buffer
- * @block_type_mask: any combination of e1000_nvm_block_type values
- * @eeprom_buffer: pointer to eeprom image buffer
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function reads masked list of protected EEPROM blocks from device
- * specific e1000_nvm_protected_block list. If words is set to NULL the
- * function returns the size of buffer required to hold masked list of EEPROM
- * blocks. If eeprom_buffer is not specified the function will read data from
- * onboard EEPROM.
- **/
-s32 e1000_get_protected_blocks(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 *blocks_number, u32 block_type_mask,
- u16 *eeprom_buffer, u32 eeprom_buffer_size)
-{
- if (hw->nvm.ops.get_protected_blocks)
- return hw->nvm.ops.get_protected_blocks(hw, blocks,
- blocks_number, block_type_mask,
- eeprom_buffer, eeprom_buffer_size);
- return -E1000_ERR_CONFIG;
-}
-/**
- * e1000_read_protected_blocks - Read EEPROM protected blocks
- * @hw: pointer to hardware structure
- * @blocks: pointer to the protected blocks to read
- * @blocks_number: number of blocks to read
- * @eeprom_buffer: pointer to eeprom image buffer
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function reads the content of EEPROM protected blocks from
- * eeprom_buffer (if provided) or onboard EEPROM.
- **/
-s32 e1000_read_protected_blocks(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size)
-{
- if (hw->nvm.ops.read_protected_blocks)
- return hw->nvm.ops.read_protected_blocks(hw, blocks,
- blocks_number, eeprom_buffer,
- eeprom_buffer_size);
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_write_protected_blocks - Read EEPROM protected blocks
- * @hw: pointer to hardware structure
- * @blocks: pointer to the protected blocks to write
- * @blocks_number: number of blocks to write
- * @eeprom_buffer: pointer to eeprom image buffer
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function writes the content of EEPROM protected blocks from
- * eeprom_buffer (if provided) or onboard EEPROM.
- **/
-s32 e1000_write_protected_blocks(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size)
-{
- if (hw->nvm.ops.write_protected_blocks)
- return hw->nvm.ops.write_protected_blocks(hw, blocks,
- blocks_number, eeprom_buffer,
- eeprom_buffer_size);
- return -E1000_ERR_CONFIG;
}
/**
extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
s32 e1000_set_mac_type(struct e1000_hw *hw);
s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
s32 e1000_init_mac_params(struct e1000_hw *hw);
s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
-s32 e1000_get_protected_block_size(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size);
-s32 e1000_get_protected_blocks(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 *blocks_number, u32 block_type_mask,
- u16 *eeprom_buffer, u32 eeprom_buffer_size);
-s32 e1000_read_protected_blocks(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size);
-s32 e1000_write_protected_blocks(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size);
-
/*
* TBI_ACCEPT macro definition:
*
(((length) > min_frame_size) && \
((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+#ifndef E1000_MAX
+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef E1000_DIVIDE_ROUND_UP
+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
#endif
+#endif /* _E1000_API_H_ */
#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 12
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
+/* Packet Types as indicated in the Adv/Ext receive descriptor. */
+#define E1000_RXD_PKTTYPE_MASK 0x000F0000
+#define E1000_RXD_PKTTYPE_PTP 0x000E0000
#define E1000_MRQC_ENABLE_MASK 0x00000007
#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
/* Extended desc bits for Linksec and timesync */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
#define E1000_TCTL_RST 0x00000001 /* software reset */
#define E1000_PBA_48K 0x0030 /* 48KB */
#define E1000_PBA_64K 0x0040 /* 64KB */
-#define E1000_PBA_RXA_MASK 0xFFFF;
+#define E1000_PBA_RXA_MASK 0xFFFF
#define E1000_PBS_16K E1000_PBA_16K
#define E1000_PBS_24K E1000_PBA_24K
#define E1000_ICR_SRPD 0x00010000
#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
#define E1000_ICR_MNG 0x00040000 /* Manageability event */
-#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
-#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
/* Q0 Rx desc FIFO parity error */
#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0
#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
#define E1000_TSICR_TXTS 0x00000002
#define E1000_TSIM_TXTS 0x00000002
#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
#define NVM_VERSION 0x0005
#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
#define NVM_PHY_CLASS_WORD 0x0007
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
#define NVM_MAC_ADDR 0x0000
#define NVM_SUB_DEV_ID 0x000B
#define M88E1000_I_PHY_ID 0x01410C30
#define M88E1011_I_PHY_ID 0x01410C20
#define IGP01E1000_I_PHY_ID 0x02A80380
-#define M88E1145_E_PHY_ID 0x01410CD0
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
#define M88E1112_E_PHY_ID 0x01410C90
/* Lx power decision based on DMA coal */
#define E1000_PCIEMISC_LX_DECISION 0x00000080
+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
-/* Timestamp in Rx buffer */
-#define E1000_RXPBS_CFG_TS_EN 0x80000000
-
/* Proxy Filer Control */
#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
#define E1000_DEV_ID_I350_SERDES 0x1523
#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_DEV_ID_I350_DA4 0x1546
+#if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW)
+#define E1000_DEV_ID_I210_NVMLESS 0x1531
+#endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */
#define E1000_DEV_ID_I210_COPPER 0x1533
#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
#define E1000_DEV_ID_I210_COPPER_IT 0x1535
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
-enum e1000_nvm_block_type {
- e1000_block_undefined = 0x00000000,
- e1000_block_mac_address = 0x00000001,
- e1000_block_alt_mac_address = 0x00000002,
- e1000_block_fcoe_config = 0x00000004,
- e1000_block_wol_config = 0x00000008,
- e1000_block_pointer = 0x00000010,
- e1000_block_general_config = 0x00000020,
- e1000_block_preboot_data = 0x00000040,
- e1000_block_iscsi_boot_config = 0x00000080,
- e1000_block_flash_config = 0x00000100,
- e1000_type_lan_core_module = 0x00000200,
- e1000_type_san_mac = 0x00000400,
- e1000_type_alt_san_mac = 0x00000800,
- e1000_block_user = 0x80000000,
- e1000_block_all = 0xFFFFFFFF,
-};
-
-struct e1000_nvm_protected_block {
- u32 word_address;
- enum e1000_nvm_block_type block_type;
- bool pointer;
- u32 pointed_word_offset;
- u32 block_size;
- u16 word_mask;
- u16 *buffer;
-};
-
-#define E1000_ISCSI_BLOCK_SIZE_WORD_OFFSET 0x01
#include "e1000_mac.h"
#include "e1000_phy.h"
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
s32 (*validate)(struct e1000_hw *);
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*get_protected_block_size)(struct e1000_hw *,
- struct e1000_nvm_protected_block *, u16 *, u32);
- s32 (*get_protected_blocks)(struct e1000_hw *,
- struct e1000_nvm_protected_block *, u16 *, u32, u16 *, u32);
- s32 (*read_protected_blocks)(struct e1000_hw *,
- struct e1000_nvm_protected_block *, u16, u16 *, u32);
- s32 (*write_protected_blocks)(struct e1000_hw *,
- struct e1000_nvm_protected_block *, u16, u16 *, u32);
};
#define E1000_MAX_SENSORS 3
enum e1000_serdes_link_state serdes_link_state;
bool serdes_has_link;
bool tx_pkt_filtering;
- u32 max_frame_size;
struct e1000_thermal_sensor_data thermal_sensor_data;
};
bool eee_disable;
bool module_plugged;
u32 mtu;
+ struct sfp_e1000_flags eth_flags;
};
struct e1000_dev_spec_vf {
if (ret_val != E1000_SUCCESS)
DEBUGOUT("MAC Addr not found in iNVM\n");
break;
- case NVM_ID_LED_SETTINGS:
case NVM_INIT_CTRL_2:
+ ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_INIT_CTRL_4:
+ ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_1_CFG:
+ ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_0_2_CFG:
- e1000_read_invm_i211(hw, (u8)offset, data);
+ ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
break;
- case NVM_COMPAT:
- *data = ID_LED_DEFAULT_I210;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
}
/**
+ * e1000_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver)
+{
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ DEBUGFUNC("e1000_read_invm_version");
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
* e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
return ret_val;
}
+#if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW)
+/**
+ * e1000_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+static bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ DEBUGFUNC("e1000_get_flash_presence_i210");
+
+ eec = E1000_READ_REG(hw, E1000_EECD);
+
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+#endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */
/**
* e1000_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
switch (hw->mac.type) {
case e1000_i210:
+#if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW)
+ if (e1000_get_flash_presence_i210(hw))
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+ else
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
+#else
hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+#endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */
break;
case e1000_i211:
hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data);
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
+/* NVM offset defaults for Pearsonville device */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
#endif
static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
/**
* e1000_init_mac_ops_generic - Initialize MAC function pointers
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
return ret_val;
}
- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
hw->fc.requested_mode = e1000_fc_none;
else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
NVM_WORD0F_ASM_DIR)
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return E1000_SUCCESS;
/*
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = E1000_SUCCESS;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
}
}
+ /*
+ * Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+ && mac->autoneg) {
+ /*
+ * Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ DEBUGOUT("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /*
+ * The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
+
+ /*
+ * Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /*
+ * For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /*
+ * For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /*
+ * Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /*
+ * Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
return E1000_SUCCESS;
}
hw->phy.mdix = 1;
return -E1000_ERR_CONFIG;
}
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
+ * operation.
+ **/
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
return E1000_SUCCESS;
}
s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
s32 e1000_setup_led_generic(struct e1000_hw *hw);
s32 e1000_setup_link_generic(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
u32 offset, u8 data);
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
-void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
void e1000_reset_adaptive_generic(struct e1000_hw *hw);
void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
/* Check that the host interface is enabled. */
hicr = E1000_READ_REG(hw, E1000_HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
+ if (!(hicr & E1000_HICR_EN)) {
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
/* Check that the host interface is enabled. */
hicr = E1000_READ_REG(hw, E1000_HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
+ if (!(hicr & E1000_HICR_EN)) {
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
/* Check that the host interface is enabled. */
hicr = E1000_READ_REG(hw, E1000_HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
+ if (!(hicr & E1000_HICR_EN)) {
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_CONFIG;
}
- if ((hicr & E1000_HICR_MEMORY_BASE_EN) == 0) {
+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
return -E1000_ERR_CONFIG;
}
#include "e1000_api.h"
-static void e1000_stop_nvm(struct e1000_hw *hw);
static void e1000_reload_nvm_generic(struct e1000_hw *hw);
/**
nvm->ops.valid_led_default = e1000_null_led_default;
nvm->ops.validate = e1000_null_ops_generic;
nvm->ops.write = e1000_null_write_nvm;
- nvm->ops.get_protected_block_size =
- e1000_get_protected_block_size_generic;
- nvm->ops.read_protected_blocks = e1000_read_protected_blocks_generic;
- nvm->ops.write_protected_blocks = e1000_write_protected_blocks_generic;
}
/**
if (nvm_data != NVM_PBA_PTR_GUARD) {
DEBUGOUT("NVM PBA number is not stored as string\n");
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
DEBUGOUT("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
/* if data is not ptr guard the PBA must be in legacy format */
if (nvm_data != NVM_PBA_PTR_GUARD) {
- *pba_num_size = 11;
+ *pba_num_size = E1000_PBANUM_LENGTH;
return E1000_SUCCESS;
}
}
/**
- * e1000_get_protected_block_size_generic - Get the size of EEPROM block
- * @hw: pointer to hardware structure
- * @block: pointer to the protected block structure describing our block
- * @eeprom_buffer: pointer to eeprom image buffer.
- * @eeprom_buffer_size: size of eeprom_buffer
+ * e1000_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
*
- * This function reads the size of protected EEPROM block from the EEPROM
- * content (if eeprom_buffer = NULL) or from eeprom_buffer.
+ * unsupported/not present features return 0 in version structure
**/
-s32 e1000_get_protected_block_size_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size)
+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
- s32 status;
- u16 pointer, size_word;
-
- DEBUGFUNC("e1000_get_protected_block_size_generic");
-
- if ( (!block) || (0 == block->pointer) ) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- if (block->block_size) {
- status = E1000_SUCCESS;
- goto out;
- }
-
- if (block->pointer) {
- if (eeprom_buffer) {
- if (block->word_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- pointer = eeprom_buffer[block->word_address];
- status = E1000_SUCCESS;
- } else
- status = e1000_read_nvm(hw, block->word_address, 1,
- &pointer);
- if (status != E1000_SUCCESS)
- goto out;
-
- if (pointer == 0xFFFF) {
- block->block_size = 0;
- goto out;
- }
- }
-
- switch (block->block_type) {
- case e1000_block_iscsi_boot_config:
- /* size of the 'iSCSI Module Structure' is in 'Block Size'
- * at word offset [0x01] */
- pointer += E1000_ISCSI_BLOCK_SIZE_WORD_OFFSET;
- if (eeprom_buffer) {
- if ((u32)pointer + 1 > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- size_word = eeprom_buffer[pointer];
- status = E1000_SUCCESS;
- } else {
- status = e1000_read_nvm(hw, pointer, 1, &size_word);
- if (status != E1000_SUCCESS)
- goto out;
- }
-
- /* Block size is in bytes, so we need to conver it to words */
- block->block_size = size_word / 2;
+ u16 eeprom_verh, eeprom_verl, fw_version;
+ u16 comb_verh, comb_verl, comb_offset;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ /* this code only applies to certain mac types */
+ switch (hw->mac.type) {
+ case e1000_i211:
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
break;
default:
- status = -E1000_ERR_INVALID_ARGUMENT;
- break;
- }
-out:
- return status;
-}
-
-/**
- * e1000_read_protected_block_generic - Read EEPROM protected block
- * @hw: pointer to hardware structure
- * @block: pointer to the protected block to read
- * @eeprom_buffer: pointer to eeprom image buffer.
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function reads the content of EEPROM protected block from buffer (if
- * provided) or EEPROM.
- **/
-s32 e1000_read_protected_block_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size)
-{
- s32 status;
- u32 max_address;
- u16 pointer;
-
- DEBUGFUNC("e1000_read_eeprom_protected_block_generic");
-
- if (!block || !block->buffer) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- /* Read raw word */
- if (!block->pointer) {
- max_address = block->block_size + block->word_address;
- if (eeprom_buffer) {
- if (max_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- memcpy(block->buffer,
- eeprom_buffer + block->word_address,
- 2 * (max_address - block->word_address));
- status = E1000_SUCCESS;
- } else
- status = e1000_read_nvm(hw, block->word_address,
- block->block_size, block->buffer);
- }
- /* Read pointer */
- else {
- if (eeprom_buffer) {
- if (block->word_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- pointer = eeprom_buffer[block->word_address];
- status = E1000_SUCCESS;
- } else
- status = e1000_read_nvm(hw, block->word_address, 1,
- &pointer);
- if (status != E1000_SUCCESS)
- goto out;
-
- pointer += block->pointed_word_offset;
- max_address = block->block_size + pointer;
- if (eeprom_buffer) {
- if (max_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- memcpy(block->buffer, eeprom_buffer + pointer,
- 2 * (max_address - pointer));
- status = E1000_SUCCESS;
- } else
- status = e1000_read_nvm(hw, pointer, block->block_size,
- block->buffer);
- }
-out:
- return status;
-}
-
-/**
- * e1000_read_protected_blocks_generic - Read EEPROM protected blocks
- * @hw: pointer to hardware structure
- * @blocks: pointer to the protected blocks to read
- * @blocks_number: number of blocks to read
- * @eeprom_buffer: pointer to eeprom image buffer.
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function reads the content of EEPROM protected blocks from buffer (if
- * provided) or EEPROM.
- **/
-s32 e1000_read_protected_blocks_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size)
-{
- s32 status = E1000_SUCCESS;
- u16 i;
-
- DEBUGFUNC("e1000_read_protected_blocks_generic");
-
- if (!blocks) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- /* Check if all buffers are allocated */
- for (i = 0; i < blocks_number; i++) {
- if (!blocks[i].buffer) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- }
-
- /* Read all protected blocks */
- for (i = 0; i < blocks_number; i++) {
- status = e1000_read_protected_block_generic(hw,
- blocks + i, eeprom_buffer, eeprom_buffer_size);
- if (status != E1000_SUCCESS)
- goto out;
- }
-out:
- return status;
+ return;
}
-/**
- * e1000_write_eeprom_protected_block_generic - Write EEPROM protected block
- * @hw: pointer to hardware structure
- * @block: pointer to the protected block to write
- * @eeprom_buffer: pointer to eeprom image buffer.
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function writes the content of EEPROM protected block to buffer (if
- * provided) or EEPROM.
- **/
-s32 e1000_write_protected_block_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size)
-{
- s32 status = E1000_SUCCESS;
- u32 start_address, end_address, address;
- u16 pointer, word;
-
- DEBUGFUNC("e1000_write_eeprom_protected_block_generic");
-
- if (!block || !block->buffer) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- /* Write raw word */
- if (!block->pointer) {
- start_address = block->word_address;
- end_address = start_address + block->block_size;
- if (eeprom_buffer) {
- if (end_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- status = E1000_SUCCESS;
- }
- }
- /* Write pointer */
- else {
- if (eeprom_buffer) {
- if (block->word_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
+ /* basic eeprom version numbers */
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
+
+ /* etrack id */
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i350:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
}
- pointer = eeprom_buffer[block->word_address];
- status = E1000_SUCCESS;
- } else
- status = e1000_read_nvm(hw, block->word_address, 1,
- &pointer);
- if (status != E1000_SUCCESS)
- goto out;
- /* Check if current pointer isn't 0xFFFF (not allocated) */
- if (pointer == 0xFFFF) {
- status = -E1000_ERR_NVM;
- DEBUGOUT1("Error. Cannot merge record %d",
- block->word_address);
- goto out;
- }
- start_address = pointer + block->pointed_word_offset;
- end_address = start_address + block->block_size;
- if (eeprom_buffer) {
- if (end_address > eeprom_buffer_size) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- }
- }
-
- /* Finally write the changes to the EEPROM */
- for (address = start_address; address < end_address; address++) {
- status = e1000_read_nvm(hw, address, 1, &word);
- if (status != E1000_SUCCESS)
- break;
- /* apply the mask */
- word &= ~block->word_mask;
- word |= (block->buffer[address - start_address] &
- block->word_mask);
- if (eeprom_buffer)
- eeprom_buffer[address] = word;
- else
- status = e1000_write_nvm(hw, address, 1, &word);
- if (status != E1000_SUCCESS)
- break;
- }
-out:
- return status;
-}
-
-/**
- * e1000_write_protected_blocks_generic - Read EEPROM protected blocks
- * @hw: pointer to hardware structure
- * @blocks: pointer to the protected blocks to write
- * @blocks_number: number of blocks to read
- * @eeprom_buffer: pointer to eeprom image buffer.
- * @eeprom_buffer_size: size of eeprom_buffer
- *
- * This function writes the content of EEPROM protected blocks from buffer (if
- * provided) or EEPROM.
- **/
-s32 e1000_write_protected_blocks_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size)
-{
- s32 status = E1000_SUCCESS;
- u16 i;
-
- DEBUGFUNC("ixgbe_write_protected_blocks_generic");
-
- if (!blocks) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
- /* Check if all buffers are allocated */
- for (i = 0; i < blocks_number; i++) {
- if (!blocks[i].buffer) {
- status = -E1000_ERR_INVALID_ARGUMENT;
- goto out;
}
- }
+ break;
- /* Write all protected blocks */
- for (i = 0; i < blocks_number; i++) {
- status = e1000_write_protected_block_generic(hw, blocks + i,
- eeprom_buffer, eeprom_buffer_size);
- if (status != E1000_SUCCESS)
+ default:
break;
}
-out:
- return status;
+ return;
}
-/**
- * e1000_get_protected_blocks_from_table - Get the masked list of protected
- * EEPROM words from device specific table
- * @hw: pointer to hardware structure
- * @protected_blocks_table: pointer to device-specific list of protected blocks
- * @protected_blocks_table_size: size of protected_blocks_table list
- * @blocks: buffer to contain the list of protected blocks
- * @blocks_size: size of the blocks buffer
- * @block_type_mask: any combination of e1000_nvm_block_type types.
- *
- * This function gets masked list of protected EEPROM blocks from device
- * specific list. If blocks is set to NULL, the function return the size
- * of blocks buffer required to hold masked list
- **/
-s32 e1000_get_protected_blocks_from_table(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *protected_blocks_table,
- u16 protected_blocks_table_size,
- struct e1000_nvm_protected_block *blocks, u16 *blocks_size,
- u32 block_type_mask, u16 *eeprom_buffer, u32 eeprom_size)
-{
- struct e1000_nvm_protected_block *current_block;
- s32 status = E1000_SUCCESS;
- u16 i, pointer_value, masked_blocks_count;
-
- DEBUGFUNC("e1000_get_protected_blocks_from_table");
-
- masked_blocks_count = 0;
-
- /* get the number of blocks to copy */
- for (i = 0; i < protected_blocks_table_size; i++) {
- current_block = &protected_blocks_table[i];
- if ((current_block->block_type & block_type_mask) == 0)
- continue;
-
- /* If it's a pointer read its value */
- if (current_block->pointer) {
- status = e1000_read_nvm(hw,
- current_block->word_address, 1,
- &pointer_value);
- if (status != E1000_SUCCESS)
- goto out;
- /* Skip empty pointers */
- if (pointer_value == 0xFFFF)
- continue;
- }
-
- /* Copy blocks listed in table to the provided buffer */
- if (blocks) {
- if (masked_blocks_count >= *blocks_size) {
- status = -E1000_ERR_NO_SPACE;
- goto out;
- }
- status = e1000_get_protected_block_size(hw,
- current_block,
- eeprom_buffer,
- eeprom_size);
- memcpy(&blocks[masked_blocks_count], current_block,
- sizeof(struct e1000_nvm_protected_block));
- if (status != E1000_SUCCESS)
- goto out;
- }
- masked_blocks_count++;
- }
-
- if (!blocks) {
- *blocks_size = masked_blocks_count;
- status = E1000_SUCCESS;
- }
-out:
- return status;
-}
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
void e1000_null_nvm_generic(struct e1000_hw *hw);
u16 *data);
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_get_fw_version(struct e1000_hw *hw,
+ struct e1000_fw_version *fw_vers);
#define E1000_STM_OPCODE 0xDB00
-s32 e1000_get_protected_block_size_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size);
-s32 e1000_read_protected_block_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size);
-s32 e1000_read_protected_blocks_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size);
-s32 e1000_write_protected_block_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *block,
- u16 *eeprom_buffer, u32 eeprom_buffer_size);
-s32 e1000_write_protected_blocks_generic(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *blocks,
- u16 blocks_number, u16 *eeprom_buffer,
- u32 eeprom_buffer_size);
-s32 e1000_get_protected_blocks_from_table(struct e1000_hw *hw,
- struct e1000_nvm_protected_block *protected_blocks_table,
- u16 protected_blocks_table_size,
- struct e1000_nvm_protected_block *blocks, u16 *blocks_size,
- u32 block_type_mask, u16 *eeprom_buffer, u32 eeprom_size);
#endif
#endif
+#ifdef DEBUG
+#define DEBUGOUT(S) printk(KERN_DEBUG S)
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A)
+#else
#define DEBUGOUT(S)
#define DEBUGOUT1(S, A...)
+#endif
+#ifdef DEBUG_FUNC
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#else
#define DEBUGFUNC(F)
+#endif
#define DEBUGOUT2 DEBUGOUT1
#define DEBUGOUT3 DEBUGOUT2
#define DEBUGOUT7 DEBUGOUT3
if (ret_val)
return ret_val;
+ /* Set MDI/MDIX mode */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /*
+ * Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
return e1000_set_master_slave_mode(hw);
}
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction == 1)
+ if (phy->disable_polarity_correction)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction == 1)
+ if (phy->disable_polarity_correction)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift and setting it to X6 */
* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
- if (phy->autoneg_advertised == 0)
+ if (!phy->autoneg_advertised)
phy->autoneg_advertised = phy->autoneg_mask;
DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
/*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
+ * Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
*/
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
if (ret_val)
return ret_val;
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
if (ret_val)
return ret_val;
+ }
DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
DEBUGFUNC("e1000_phy_hw_reset_generic");
+ if (phy->ops.check_reset_block) {
ret_val = phy->ops.check_reset_block(hw);
if (ret_val)
return E1000_SUCCESS;
+ }
ret_val = phy->ops.acquire(hw);
if (ret_val)
enum e1000_phy_type phy_type = e1000_phy_unknown;
switch (phy_id) {
- case M88E1145_E_PHY_ID:
case M88E1000_I_PHY_ID:
case M88E1000_E_PHY_ID:
case M88E1111_I_PHY_ID:
#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100
/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82577 PHY Diagnostics Status */
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FEXT 0x0002C /* Future Extended - RW */
-#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM2 0x00030 /* Future Extended NVM 2 - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_TQAVHC(_n) (0x0300C + ((_n) * 0x40))
#define E1000_TQAVCC(_n) (0x03004 + ((_n) * 0x40))
+/* QAV Tx mode control register */
+#define E1000_I210_TQAVCTRL 0x3570
+
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
+
+/* Queues fetch arbitration priority control register */
+#define E1000_I210_TQAVARBCTRL 0x3574
+/* Queues priority masks where _n and _p can be 0-3. */
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
+
+/* Good transmitted packets counter registers */
+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
+
/*
* Convenience macros
*
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
/* PCIe Parity Status Register */
#define E1000_PCIEERRSTS 0x05BA8
-#define E1000_LTRMINV 0x5BB0 /* LTR Minimum Value */
-#define E1000_LTRMAXV 0x5BB4 /* LTR Maximum Value */
-#define E1000_DOBFFCTL 0x3F24 /* DMA OBFF Control Register */
-
#define E1000_PROXYS 0x5F64 /* Proxying Status */
#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
/* Thermal sensor configuration and status registers */
#include <linux/ethtool.h>
#endif
-#include <linux/clocksource.h>
-#include <linux/net_tstamp.h>
-#ifdef CONFIG_PTP
-#include <linux/ptp_clock_kernel.h>
-#endif
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
struct igb_adapter;
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
__FUNCTION__ , ## args))
+#ifdef CONFIG_IGB_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif /* CONFIG_IGB_PTP */
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
-#define IGB_DEFAULT_TX_WORK 128
#define IGB_MIN_TXD 80
#define IGB_MAX_TXD 4096
+#define IGB_DEFAULT_TX_WORK 128
#define IGB_DEFAULT_RXD 256
#define IGB_MIN_RXD 80
#define OUI_LEN 3
#define IGB_MAX_VMDQ_QUEUES 8
-#define E1000_DMACDC 0x3F1C
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
*/
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_512 512
-#define IGB_RXBUFFER_2048 2048
-#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384
#define IGB_RX_HDR_LEN IGB_RXBUFFER_512
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IGB_EEPROM_APME 0x0400
-#ifndef ETH_TP_MDI_X
#define AUTO_ALL_MODES 0
-#endif
#ifndef IGB_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
u64 drops;
u64 csum_err;
u64 alloc_failed;
- u64 csum_good;
- u64 rx_hdr_split;
- u64 lli_int;
- u64 pif_count;
};
struct igb_ring_container {
struct igb_ring_container rx, tx;
struct napi_struct napi;
- int numa_node;
u16 itr_val;
u8 set_itr;
#endif
/* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */
- int numa_node; /* node to alloc ring memory on */
} ____cacheline_internodealigned_in_smp;
bool fc_autoneg;
u8 tx_timeout_factor;
+#ifdef DEBUG
+ bool tx_hang_detected;
+ bool disable_hw_reset;
+#endif
u32 max_frame_size;
/* OS defined structs */
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
- u32 eeprom_wol;
+ bool wol_supported;
u32 *config_space;
u16 tx_ring_count;
int int_mode;
u32 rss_queues;
u32 vmdq_pools;
- u16 fw_version;
- int node;
+ char fw_version[32];
u32 wvbr;
struct igb_mac_addr *mac_table;
#ifdef CONFIG_IGB_VMDQ_NETDEV
#endif
int vferr_refcount;
int dmac;
- u64 dmac_entries;
- int count;
u32 *shadow_vfta;
-#ifdef CONFIG_PTP
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_caps;
- struct delayed_work ptp_overflow_work;
- struct work_struct ptp_tx_work;
- struct sk_buff *ptp_tx_skb;
- spinlock_t tmreg_lock;
- struct cyclecounter cc;
- struct timecounter tc;
-#endif /* CONFIG_PTP */
-
/* External Thermal Sensor support flag */
bool ets;
#ifdef IGB_SYSFS
struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
#endif /* IGB_PROCFS */
#endif /* IGB_SYSFS */
+ u32 etrack_id;
+
+#ifdef CONFIG_IGB_PTP
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct delayed_work ptp_overflow_work;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+#endif /* CONFIG_IGB_PTP */
};
#ifdef CONFIG_IGB_VMDQ_NETDEV
#define IGB_FLAG_EEE (1 << 6)
#define IGB_FLAG_DMAC (1 << 7)
#define IGB_FLAG_DETECT_BAD_DMA (1 << 8)
+#define IGB_FLAG_PTP (1 << 9)
#define IGB_MIN_TXPBSIZE 20408
#define IGB_TX_BUF_4096 4096
} cmd_or_resp;
u8 checksum;
};
+#pragma pack(push,1)
struct e1000_fw_drv_info {
struct e1000_fw_hdr hdr;
u8 port_num;
u16 pad; /* end spacing to ensure length is mult. of dword */
u8 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+#pragma pack(pop)
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
#ifdef CONFIG_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
+extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
extern void igb_vlan_mode(struct net_device *, u32);
#endif
-/* TLP Processing Hints (TPH) definitions */
-#define E1000_DCA_CTRL_TPH_READ_DISABLE (1 << 8) /* TPH Read Hint disable */
-#define E1000_DCA_CTRL_TPH_DATA_PH 0x00001000 /* TPH Data hint mode */
-
-#define E1000_TPH_RXCTRL_CPUID_MASK 0xFF000000 /* Rx CPUID Mask */
-#define E1000_TPH_RXCTRL_FTCH_DCA_EN (1 << 0) /* TPH Rx Desc fetch enable */
-#define E1000_TPH_RXCTRL_DESC_DCA_EN (1 << 1) /* TPH Rx Desc writeback enable */
-#define E1000_TPH_RXCTRL_HEAD_DCA_EN (1 << 2) /* TPH Rx Data header enable */
-#define E1000_TPH_RXCTRL_DATA_DCA_EN (1 << 3) /* TPH Rx Data payload enable */
-#define E1000_TPH_RXCTRL_AUTOLEARN_EN (1 << 23) /* TPH Rx Autolearn enable */
-
-#define E1000_TPH_TXCTRL_CPUID_MASK 0xFF000000 /* Tx CPUID Mask */
-#define E1000_TPH_TXCTRL_FTCH_DCA_EN (1 << 0) /* TPH Tx Desc fetch enable */
-#define E1000_TPH_TXCTRL_DESC_DCA_EN (1 << 1) /* TPH Tx Desc writeback enable */
-#define E1000_TPH_TXCTRL_DATA_DCA_EN (1 << 3) /* TPH Tx Data payload enable */
-#define E1000_TPH_TXCTRL_AUTOLEARN_EN (1 << 23) /* TPH Tx Autolearn enable */
-
-
#ifdef IGB_SYSFS
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = (ADVERTISED_TP |
- ADVERTISED_Pause);
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ ecmd->advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
ecmd->advertising |= hw->phy.autoneg_advertised;
}
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
} else {
ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
+ ecmd->advertising = (ADVERTISED_FIBRE |
ADVERTISED_Autoneg |
ADVERTISED_Pause);
+ if (adapter->link_speed == SPEED_100)
+ ecmd->advertising = ADVERTISED_100baseT_Full;
+ else
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
}
- ecmd->transceiver = XCVR_INTERNAL;
status = E1000_READ_REG(hw, E1000_STATUS);
if (status & E1000_STATUS_LU) {
- if ((status & E1000_STATUS_SPEED_1000) ||
- hw->phy.media_type != e1000_media_type_copper)
+ if (status & E1000_STATUS_SPEED_1000)
ecmd->speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
ecmd->speed = SPEED_100;
ecmd->duplex = -1;
}
- ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ if ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
#ifdef ETH_TP_MDI_X
/* MDI-X => 2; MDI =>1; Invalid =>0 */
- if ((hw->phy.media_type == e1000_media_type_copper) &&
- netif_carrier_ok(netdev))
+ if (hw->phy.media_type == e1000_media_type_copper)
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+#ifdef ETH_TP_MDI_AUTO
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
+#endif
#endif /* ETH_TP_MDI_X */
return 0;
}
return -EINVAL;
}
+#ifdef ETH_TP_MDI_AUTO
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
+#endif /* ETH_TP_MDI_AUTO */
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ hw->phy.autoneg_advertised = ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ if (adapter->link_speed == SPEED_1000)
+ hw->phy.autoneg_advertised =
+ ADVERTISED_1000baseT_Full;
+ else
+ hw->phy.autoneg_advertised =
+ ADVERTISED_100baseT_Full;
+ } else {
hw->phy.autoneg_advertised = ecmd->advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
+ }
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
hw->fc.current_mode = hw->fc.requested_mode;
- retval = ((hw->phy.media_type == e1000_media_type_copper) ?
- e1000_force_mac_fc(hw) : hw->mac.ops.setup_link(hw));
+ retval = hw->mac.ops.setup_link(hw);
}
clear_bit(__IGB_RESETTING, &adapter->state);
strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- snprintf(drvinfo->fw_version, 32, "%d.%d-%d",
- (adapter->fw_version & 0xF000) >> 12,
- (adapter->fw_version & 0x0FF0) >> 4,
- adapter->fw_version & 0x000F);
+ strncpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version) - 1);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
drvinfo->n_stats = IGB_STATS_LEN;
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
- u16 temp;
- u16 checksum = 0;
- u16 i;
*data = 0;
- /* Read and add up the contents of the EEPROM */
- for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
- *data = 1;
- break;
- }
- checksum += temp;
- }
- /* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) NVM_SUM) && !(*data))
+ /* Validate NVM checksum */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
*data = 2;
return *data;
ics_mask = 0x77DCFED5;
break;
case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
case e1000_i210:
case e1000_i211:
- ics_mask = 0x77DCFED5;
+ ics_mask = 0x774CFED5;
break;
default:
ics_mask = 0x7FFFFFFF;
if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
- udelay(500);
+ mdelay(500);
return 0;
}
dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
/* power up link for link test */
- if ((adapter->hw.mac.type != e1000_i210) &&
- (adapter->hw.mac.type != e1000_i211))
igb_power_up_link(adapter);
/* Link test performed before hardware reset so autoneg doesn't
igb_reset(adapter);
/* power up link for loopback test */
- if ((adapter->hw.mac.type != e1000_i210) &&
- (adapter->hw.mac.type != e1000_i211))
igb_power_up_link(adapter);
if (igb_loopback_test(adapter, &data[3]))
msleep_interruptible(4 * 1000);
}
-static int igb_wol_exclusion(struct igb_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_hw *hw = &adapter->hw;
- int retval = 1; /* fail by default */
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- /* WoL not supported */
- wol->supported = 0;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events not supported on port B */
- if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- default:
- /* dual port cards only support WoL on port A from now on
- * unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
- if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
- !adapter->eeprom_wol) {
- wol->supported = 0;
- break;
- }
-
- retval = 0;
- }
-
- return retval;
-}
-
static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
WAKE_PHY;
wol->wolopts = 0;
- /* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!adapter->wol_supported || !device_can_wakeup(&adapter->pdev->dev))
return;
/* apply any specific unsupported masks here */
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!adapter->wol_supported || !device_can_wakeup(&adapter->pdev->dev))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
adapter->wol = 0;
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_alloc_failed", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_good", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_hdr_split", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_lli_int", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_pif_count", i);
- p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
-#ifdef CONFIG_PTP
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
static int igb_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+#ifdef CONFIG_PTP
case e1000_82576:
case e1000_82580:
case e1000_i350:
info->rx_filters |=
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
- break;
+ return 0;
+#endif /* CONFIG_PTP */
default:
- return ethtool_op_get_ts_info(dev, info);
- break;
+ return -EOPNOTSUPP;
}
- return 0;
}
-#endif /* CONFIG_IGB_PTP */
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
#ifdef CONFIG_PM_RUNTIME
static int igb_ethtool_begin(struct net_device *netdev)
return;
}
#endif
-static struct ethtool_ops igb_ethtool_ops = {
+#ifdef ETHTOOL_GEEE
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ipcnfg, eeer;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
+ edata->advertised |= ADVERTISED_1000baseT_Full;
+
+ if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
+ edata->advertised |= ADVERTISED_100baseT_Full;
+
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+
+ /*
+ * report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef ETHTOOL_SEEE
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = true;
+ e1000_set_eee_i350(hw);
+ } else if (edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = false;
+ e1000_set_eee_i350(hw);
+ }
+
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return 0;
+}
+#endif /* ETHTOOL_SEEE */
+
+static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
.get_drvinfo = igb_get_drvinfo,
#endif
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
-#ifdef CONFIG_PTP
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
.get_ts_info = igb_get_ts_info,
-#endif
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
#ifdef CONFIG_PM_RUNTIME
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
#endif /* HAVE_NDO_SET_FEATURES */
#ifdef ETHTOOL_GADV_COAL
.get_advcoal = igb_get_adv_coal,
- .set_advcoal = igb_set_dmac_coal
+ .set_advcoal = igb_set_dmac_coal,
#endif /* ETHTOOL_GADV_COAL */
+#ifdef ETHTOOL_GEEE
+ .get_eee = igb_get_eee,
+#endif
+#ifdef ETHTOOL_SEEE
+ .set_eee = igb_set_eee,
+#endif
};
void igb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ /* have to "undeclare" const on this struct to remove warnings */
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
}
#endif /* SIOCETHTOOL */
#include "igb.h"
#include "igb_vmdq.h"
+#if defined(DEBUG) || defined (DEBUG_DUMP) || defined (DEBUG_ICR) || defined(DEBUG_ITR)
+#define DRV_DEBUG "_debug"
+#else
#define DRV_DEBUG
+#endif
#define DRV_HW_PERF
#define VERSION_SUFFIX "_AVB"
-#define MAJ 3
-#define MIN 4
-#define BUILD 7
+#define MAJ 4
+#define MIN 0
+#define BUILD 17
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
char igb_driver_name[] = "igb_avb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
+static const char igb_copyright[] =
+ "Copyright (c) 2007-2012 Intel Corporation.";
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) },
static irqreturn_t igb_intr(int irq, void *);
static irqreturn_t igb_intr_msi(int irq, void *);
static irqreturn_t igb_msix_other(int irq, void *);
-static void igb_update_tph(struct igb_q_vector *);
-static void igb_setup_tph(struct igb_adapter *);
static irqreturn_t igb_msix_ring(int irq, void *);
#ifdef IGB_DCA
static void igb_update_dca(struct igb_q_vector *);
igb_runtime_idle)
#endif /* CONFIG_PM_RUNTIME */
};
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state);
+static int igb_resume(struct pci_dev *pdev);
#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
#endif /* CONFIG_PM */
#ifndef USE_REBOOT_NOTIFIER
#ifdef CONFIG_PM
#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
.driver.pm = &igb_pm_ops,
+#else
+ .suspend = igb_suspend,
+ .resume = igb_resume,
#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
#endif /* CONFIG_PM */
#ifndef USE_REBOOT_NOTIFIER
{
struct igb_ring *ring;
int i;
-#ifdef HAVE_DEVICE_NUMA_NODE
- int orig_node = adapter->node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
for (i = 0; i < adapter->num_tx_queues; i++) {
-#ifdef HAVE_DEVICE_NUMA_NODE
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
-#endif /* HAVE_DEVICE_NUMA_NODE */
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->dev = pci_dev_to_dev(adapter->pdev);
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[3-i] = ring; /* I210 rebase */
}
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
for (i = 0; i < adapter->num_rx_queues; i++) {
-#ifdef HAVE_DEVICE_NUMA_NODE
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
-#endif /* HAVE_DEVICE_NUMA_NODE */
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->dev = pci_dev_to_dev(adapter->pdev);
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
#endif
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i210, and i211, loopback VLAN packets
- * have the tag byte-swapped.
- * */
- if (adapter->hw.mac.type >= e1000_i350)
+ /* On i350, loopback VLAN packets have the tag byte-swapped */
+ if (adapter->hw.mac.type == e1000_i350)
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
adapter->rx_ring[i] = ring;
}
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
igb_cache_ring_register(adapter);
return E1000_SUCCESS;
err:
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
igb_free_queues(adapter);
return -ENOMEM;
int i, err = 0, vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_other, 0, netdev->name, adapter);
+ &igb_msix_other, 0, netdev->name, adapter);
if (err)
goto out;
vector++;
vfre &= ~(1 << vf_queue);
E1000_WRITE_REG(hw, E1000_VFRE, vfre);
- /* Disable MDFB related bit */
+ /* Disable MDFB related bit. Clear on write */
mdfb = E1000_READ_REG(hw, E1000_MDFB);
- mdfb &= ~(1 << vf_queue);
+ mdfb |= (1 << vf_queue);
E1000_WRITE_REG(hw, E1000_MDFB, mdfb);
/* Reset the specific VF */
*
* Enable the HW to detect malicious driver and sends an interrupt to
* the driver.
- *
- * Only available on i350 device
**/
static void igb_enable_mdd(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 reg;
+ /* Only available on i350 device */
if (hw->mac.type != e1000_i350)
return;
struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw;
int v_idx;
-#ifdef HAVE_DEVICE_NUMA_NODE
- int orig_node = adapter->node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
-#ifdef HAVE_DEVICE_NUMA_NODE
- if ((adapter->num_q_vectors == (adapter->num_rx_queues +
- adapter->num_tx_queues)) &&
- (adapter->num_rx_queues == v_idx))
- adapter->node = orig_node;
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
-#endif /* HAVE_DEVICE_NUMA_NODE */
- q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
- adapter->node);
- if (!q_vector)
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
+ q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
#ifndef IGB_NO_LRO
if (v_idx < adapter->num_rx_queues) {
int size = sizeof(struct igb_lro_list);
- q_vector->lrolist = vzalloc_node(size, q_vector->numa_node);
- if (!q_vector->lrolist)
q_vector->lrolist = vzalloc(size);
if (!q_vector->lrolist)
goto err_out;
}
#endif /* IGB_NO_LRO */
}
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
return 0;
err_out:
-#ifdef HAVE_DEVICE_NUMA_NODE
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-#endif /* HAVE_DEVICE_NUMA_NODE */
igb_free_q_vectors(adapter);
return -ENOMEM;
}
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
+ e1000_phy_hw_reset(&adapter->hw);
if (adapter->hw.phy.media_type == e1000_media_type_copper)
e1000_power_up_phy(&adapter->hw);
else
e1000_power_up_fiber_serdes_link(&adapter->hw);
- e1000_phy_hw_reset(&adapter->hw);
}
/**
/* since we reset the hardware DCA settings were cleared */
igb_setup_dca(adapter);
#endif
- igb_setup_tph(adapter);
}
void igb_reinit_locked(struct igb_adapter *adapter)
clear_bit(__IGB_RESETTING, &adapter->state);
}
-static void igb_setup_flex_filter(struct igb_adapter *adapter, int filter_id,
- int filter_len, u8 *filter, u8 *mask)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i = 0, j, k;
- u32 fhft;
-
- while (i < filter_len) {
- for (j = 0; j < 8; j+= 4) {
- fhft = 0;
- for (k = 0; k < 4; k++)
- fhft |= ((u32)(filter[i + j + k])) << (k * 8);
- E1000_WRITE_REG_ARRAY(hw, E1000_FHFT(filter_id),
- (i/2) + (j/4), fhft);
- }
- E1000_WRITE_REG_ARRAY(hw, E1000_FHFT(filter_id),
- (i/2) + 2, mask[i/8]);
- i += 8;
- }
- E1000_WRITE_REG_ARRAY(hw, E1000_FHFT(filter_id),
- 63, filter_len);
- E1000_WRITE_FLUSH(hw);
-}
-
-static void igb_setup_flex_filter_wakeup(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u8 pattern[64];
- u8 mask[8];
-
- /* clear pattern to match and mask */
- memset(pattern, 0, 64);
- memset(mask, 0, 8);
-
- /*
- * This pattern is set to match on the following in a packet:
- * 0x00: xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00
- * 0x10: xx xx xx xx xx xx xx 11 xx xx xx xx xx xx xx xx
- * 0x20: xx xx xx xx 00 07 00 86 xx xx ff ff ff ff ff ff
- * 0x30: m0 m1 m2 m3 m4 m5 xx xx xx xx xx xx xx xx xx xx
- *
- * Where m0-m5 are the 6 bytes of the mac address in network order
- */
-
- /* ethertype should be IP which is 0x8000 */
- pattern[0x0C] = 0x08;
- pattern[0x0D] = 0x00;
-
- /* verify IPv4 and header length 20 */
- pattern[0x0E] = 0x45;
- pattern[0x0F] = 0x00;
- mask[1] = 0xF0;
-
- /* verify L3 protocol is UDP */
- pattern[0x17] = 0x11;
- mask[2] = 0x80;
-
- /* verify source and destination port numbers */
- pattern[0x24] = 0x00;
- pattern[0x25] = 0x07;
- pattern[0x26] = 0x00;
- pattern[0x27] = 0x86;
- mask[4] = 0xF0;
-
- /* add start pattern of 6 bytes all 0xFF */
- memset(&pattern[0x2a], 0xff, 6);
- mask[5] = 0xFC;
-
- /* add mac address */
- memcpy(&pattern[0x30], hw->mac.addr, 6);
- mask[6] |= 0x3F;
-
- E1000_WRITE_REG(hw, E1000_WUC, 0);
- E1000_WRITE_REG(hw, E1000_WUFC, 0);
-
- igb_setup_flex_filter(adapter, 0, 64, pattern, mask);
-
- E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
- E1000_WRITE_REG(hw, E1000_WUFC, E1000_WUFC_FLX0);
-}
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space;
- u16 hwm;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
if (e1000_init_hw(hw))
dev_err(pci_dev_to_dev(pdev), "Hardware Error\n");
+ /*
+ * Flow control settings reset on hardware reset, so guarantee flow
+ * control is off when forcing speed.
+ */
+ if (!hw->mac.autoneg)
+ e1000_force_mac_fc(hw);
igb_init_dmac(adapter, pba);
- /* External thermal sensor support is limited to certain i350 devices */
- if (adapter->ets) {
- /* Re-initialize external thermal sensor interface */
+ /* Re-initialize the thermal sensor on i350 devices. */
+ if (mac->type == e1000_i350 && hw->bus.func == 0) {
+ /*
+ * If present, re-initialize the external thermal sensor
+ * interface.
+ */
+ if (adapter->ets)
e1000_set_i2c_bb(hw);
e1000_init_thermal_sensor_thresh(hw);
}
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
- igb_setup_flex_filter_wakeup(adapter);
+#ifdef CONFIG_PTP
+ /* Re-enable PTP, where applicable. */
+ igb_ptp_reset(adapter);
+#endif /* CONFIG_IGB_PTP */
e1000_get_phy_info(hw);
}
#endif /* CONFIG_IGB_VMDQ_NETDEV */
/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+static void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_fw_version fw;
+
+ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+ default:
+ /* if option rom is valid, display its version too*/
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ }
+ break;
+ }
+
+ return;
+}
+
+/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in igb_pci_tbl
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
-#ifdef ETH_TP_MDI_X
- hw->phy.mdix = ETH_TP_MDI_INVALID;
-#else
hw->phy.mdix = AUTO_ALL_MODES;
-#endif /* ETH_TP_MDI_X */
hw->phy.disable_polarity_correction = FALSE;
hw->phy.ms_type = e1000_ms_hw_default;
}
netdev->features |= NETIF_F_SCTP_CSUM;
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+#ifdef DEBUG
+ if (adapter->dmac != IGB_DMAC_DISABLE)
+ printk("%s: DMA Coalescing is enabled..\n", netdev->name);
+#endif
/* before reading the NVM, reset the controller to put the device in a
* known good starting state */
igb_rar_set(adapter, 0);
/* get firmware version for ethtool -i */
- e1000_read_nvm(&adapter->hw, 5, 1, &adapter->fw_version);
+ igb_set_fw_version(adapter);
setup_timer(&adapter->watchdog_timer, &igb_watchdog,
(unsigned long) adapter);
if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
* enable the ACPI Magic Packet filter
*/
- if (hw->bus.func == 0)
- e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type >= e1000_82580)
+ /* Check the NVM for wake support for non-port A ports */
+ if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
if (eeprom_data & IGB_EEPROM_APME)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
+ adapter->wol_supported = true;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
* lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->eeprom_wol = 0;
+ adapter->wol_supported = false;
break;
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82576_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->eeprom_wol = 0;
+ adapter->wol_supported = false;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
+ adapter->wol_supported = false;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
+ default:
+ /* For all other devices, support wake on port A */
+ if (hw->bus.func == 0)
+ adapter->wol_supported = true;
+ break;
}
/* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol);
+ if (adapter->wol_supported)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->wol_supported = true;
+ adapter->wol = 0;
+ }
+
+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
+ adapter->wol_supported);
/* reset the hardware with the new settings */
igb_reset(adapter);
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
(hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
"unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4\n" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2\n" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1\n" :
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
"unknown"));
dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
for (i = 0; i < 6; i++)
pba_str);
- /* External thermal sensor support is limited to certain i350 devices */
+ /* Initialize the thermal sensor on i350 devices. */
if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
u16 ets_word;
/*
- * Read the external sensor to determine if this i350 device
- * supports an external thermal sensor
+ * Read the NVM to determine if this i350 device supports an
+ * external thermal sensor.
*/
e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
- if (ets_word != 0x0000 && ets_word != 0xFFFF) {
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
adapter->ets = true;
+ else
+ adapter->ets = false;
#ifdef IGB_SYSFS
- igb_sysfs_init(adapter);
+ igb_sysfs_init(adapter);
#else
#ifdef IGB_PROCFS
- igb_procfs_init(adapter);
+ igb_procfs_init(adapter);
#endif /* IGB_PROCFS */
#endif /* IGB_SYSFS */
- } else {
- adapter->ets = false;
- }
} else {
adapter->ets = false;
}
igb_ptp_stop(adapter);
#endif /* CONFIG_IGB_PTP */
- /*
- * The watchdog timer may be rescheduled, so explicitly
- * disable watchdog from being rescheduled.
- */
+ /* flush_scheduled work may reschedule our watchdog task, so
+ * explicitly disable watchdog tasks from being rescheduled */
set_bit(__IGB_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
- kfree(adapter->mac_table);
- kfree(adapter->shadow_vfta);
- free_netdev(netdev);
-
- pci_disable_pcie_error_reporting(pdev);
-
- pci_disable_device(pdev);
#ifdef IGB_SYSFS
igb_sysfs_exit(adapter);
igb_procfs_exit(adapter);
#endif /* IGB_PROCFS */
#endif /* IGB_SYSFS */
+ kfree(adapter->mac_table);
+ kfree(adapter->shadow_vfta);
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
}
/**
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
- int orig_node = dev_to_node(dev);
int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
- if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- set_dev_node(dev, tx_ring->numa_node);
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!tx_ring->desc)
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int orig_node = dev_to_node(dev);
int size, desc_len;
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
- if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- set_dev_node(dev, rx_ring->numa_node);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!rx_ring->desc)
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
/* enable LPE to prevent packets larger than max_frame_size */
rctl |= E1000_RCTL_LPE;
- /* enable store bad packets for SV driver only */
- rctl |= E1000_RCTL_SBP;
- /* initalize counter for other SV stuff */
- adapter->count = 0;
/* disable queue 0 to prevent tail write w/o re-config */
E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-#endif
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+ E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
#ifdef CONFIG_PTP
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
if (hw->mac.type == e1000_i350) {
thstat = E1000_READ_REG(hw, E1000_THSTAT);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- printk("%s: Checking for Thermal Status..thstat=%x \n",
- netdev->name, thstat);
- printk("%s: Thermal Status..ctrl_ext=%x \n",
- netdev->name, ctrl_ext);
if ((hw->phy.media_type ==
e1000_media_type_copper) &&
!(ctrl_ext &
#ifdef CONFIG_PTP
/* set timestamp bit if present */
- if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+ if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
#endif /* CONFIG_IGB_PTP */
struct igb_ring *tx_ring)
{
#ifdef CONFIG_PTP
- struct igb_adapter *adapter = tx_ring->q_vector->adapter;
-#endif /* CONFIG_IGB_PTP */
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+#endif /* CONFIG_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
adapter->max_frame_size = max_frame;
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
-#ifdef IGB_PER_PKT_TIMESTAMP
- if (adapter->hw.mac.type >= e1000_82580)
- max_frame += IGB_TS_HDR_LEN;
-
-#endif
/*
* RLPML prevents us from receiving a frame larger than max_frame so
* it is safe to just set the rx_buffer_len to max_frame without the
adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
- adapter->dmac_entries += E1000_READ_REG(hw, E1000_DMACDC);
mpc = E1000_READ_REG(hw, E1000_MPC);
adapter->stats.mpc += mpc;
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+ /* this stat has invalid values on i210/i211 */
+ if ((hw->mac.type != e1000_i210) &&
+ (hw->mac.type != e1000_i211))
adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
}
if (icr & E1000_ICR_TS) {
u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
- if (tsicr & E1000_TSICR_TXTS)
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
- igb_ptp_tx_hwtstamp(adapter);
+ schedule_work(&adapter->ptp_tx_work);
+ }
}
#endif /* CONFIG_PTP */
/* Check for MDD event */
return IRQ_HANDLED;
}
-static void igb_update_tph(struct igb_q_vector *q_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- int cpu = get_cpu();
- int tag = ~(cpu / 8) & 0x7; // JKT 2-socket system
-
- if (q_vector->cpu == cpu)
- goto out_no_update;
-
- if (q_vector->tx.ring) {
- int q = q_vector->tx.ring->reg_idx;
- u32 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(q));
- dca_txctrl &= ~E1000_TPH_TXCTRL_CPUID_MASK;
- dca_txctrl |= tag << E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
-// dca_txctrl |= E1000_TPH_TXCTRL_FTCH_DCA_EN;
- dca_txctrl |= E1000_TPH_TXCTRL_DESC_DCA_EN;
-// dca_txctrl |= E1000_TPH_TXCTRL_DATA_DCA_EN;
-// dca_txctrl |= E1000_TPH_TXCTRL_AUTOLEARN_EN;
- E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(q), dca_txctrl);
- }
- if (q_vector->rx.ring) {
- int q = q_vector->rx.ring->reg_idx;
- u32 dca_rxctrl = E1000_READ_REG(hw, E1000_DCA_RXCTRL(q));
- dca_rxctrl &= ~E1000_TPH_RXCTRL_CPUID_MASK;
- dca_rxctrl |= tag << E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
-// dca_rxctrl |= E1000_TPH_RXCTRL_FTCH_DCA_EN;
- dca_rxctrl |= E1000_TPH_RXCTRL_DESC_DCA_EN;
- dca_rxctrl |= E1000_TPH_RXCTRL_HEAD_DCA_EN;
- dca_rxctrl |= E1000_TPH_RXCTRL_DATA_DCA_EN;
-// dca_rxctrl |= E1000_TPH_RXCTRL_AUTOLEARN_EN;
- E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(q), dca_rxctrl);
- }
- q_vector->cpu = cpu;
-out_no_update:
- put_cpu();
-}
-
-static void igb_setup_tph(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
-
- /* Disable DCA, enable TPH read hint mode, and set TPH Data PH mode */
- E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE | E1000_DCA_CTRL_TPH_DATA_PH);
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- adapter->q_vector[i]->cpu = -1;
- igb_update_tph(adapter->q_vector[i]);
- }
-}
static void igb_write_itr(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
if (icr & E1000_ICR_TS) {
u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
- if (tsicr & E1000_TSICR_TXTS)
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
- igb_ptp_tx_hwtstamp(adapter);
+ schedule_work(&adapter->ptp_tx_work);
+ }
}
-#endif /* CONFIG_PTP */
+#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
if (icr & E1000_ICR_TS) {
u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
- if (tsicr & E1000_TSICR_TXTS)
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
- igb_ptp_tx_hwtstamp(adapter);
+ schedule_work(&adapter->ptp_tx_work);
+ }
}
#endif /* CONFIG_PTP */
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+#ifdef DEBUG
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) &&
+ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) {
+#else
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+#endif
struct e1000_hw *hw = &adapter->hw;
eop_desc = tx_buffer->next_to_watch;
E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
+#ifdef DEBUG
+ adapter->tx_hang_detected = TRUE;
+ if (adapter->disable_hw_reset) {
+ DPRINTK(DRV, WARNING,
+ "Deactivating netdev watchdog timer\n");
+ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer))
+ dev_put(netdev_ring(tx_ring));
+#ifndef HAVE_NET_DEVICE_OPS
+ netdev_ring(tx_ring)->tx_timeout = NULL;
+#endif
+ }
+#endif /* DEBUG */
dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- ring->rx_stats.csum_good++;
}
#ifdef NETIF_F_RXHASH
if (netdev_ring(ring)->features & NETIF_F_RXHASH)
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-#endif
+#endif
static void igb_rx_vlan(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
skb->len += length;
skb->data_len += length;
- skb->truesize += length;
+ skb->truesize += PAGE_SIZE / 2;
if ((page_count(buffer_info->page) != 1) ||
(page_to_nid(buffer_info->page) != current_node))
igb_rx_checksum(rx_ring, rx_desc, skb);
igb_rx_vlan(rx_ring, rx_desc, skb);
- if (rx_desc->wb.lower.lo_dword.hs_rss.hdr_info &
- cpu_to_le16(E1000_RXDADV_SPH))
- rx_ring->rx_stats.rx_hdr_split++;
- if (igb_test_staterr(rx_desc, E1000_RXDADV_ERR_HBO))
- printk("igb_rx:HBO bit set..\n");
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_DYNINT))
- rx_ring->rx_stats.lli_int++;
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_PIF))
- rx_ring->rx_stats.pif_count++;
total_bytes += skb->len;
total_packets++;
default:
return -EOPNOTSUPP;
}
- return 0;
+ return E1000_SUCCESS;
}
#endif
/**
case SIOCSHWTSTAMP:
return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
#endif /* CONFIG_IGB_PTP */
+#ifdef ETHTOOL_OPS_COMPAT
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+#endif
default:
return -EOPNOTSUPP;
}
#endif
if (enable) {
+ /* enable VLAN tag insert/strip */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
/* Disable CFI check */
rctl = E1000_READ_REG(hw, E1000_RCTL);
mac->autoneg = 0;
- /* Fiber NIC's only allow 1000 gbps Full duplex */
- if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes ) &&
- spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+ /*
+ * Fiber NIC's only allow 1000 gbps Full duplex
+ * and 100Mbps Full duplex for 100baseFx sfp
+ */
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)
+ if (spddplx != (SPEED_1000 + DUPLEX_FULL) ||
+ spddplx != (SPEED_100 + DUPLEX_FULL)) {
dev_err(pci_dev_to_dev(pdev),
"Unsupported Speed/Duplex configuration\n");
return -EINVAL;
dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
return 0;
}
#endif
netif_device_detach(netdev);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
if (netif_running(netdev))
__igb_close(netdev, true);
return retval;
#endif
- status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_LU)
- wufc &= ~E1000_WUFC_LNKC;
-
if (wufc) {
igb_setup_rctl(adapter);
igb_set_rx_mode(netdev);
#ifdef CONFIG_PM
#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_suspend(struct device *dev)
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
int retval;
bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return 0;
}
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_resume(struct device *dev)
+#else
+static int igb_resume(struct pci_dev *pdev)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
}
#ifdef CONFIG_PM_RUNTIME
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
{
return igb_resume(dev);
}
-#endif /* CONFIG_PM_RUNTIME */
#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM_RUNTIME */
#endif /* CONFIG_PM */
#ifdef USE_REBOOT_NOTIFIER
switch (hw->mac.type) {
case e1000_82575:
- case e1000_i210:
- case e1000_i211:
default:
/* replication is not supported for 82575 */
return;
e1000_vmdq_set_loopback_pf(hw, adapter->vfs_allocated_count ||
adapter->vmdq_pools);
- e1000_vmdq_set_anti_spoofing_pf(hw, adapter->vfs_allocated_count ||
- adapter->vmdq_pools,
+ e1000_vmdq_set_anti_spoofing_pf(hw,
+ adapter->vfs_allocated_count || adapter->vmdq_pools,
adapter->vfs_allocated_count);
e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count ||
adapter->vmdq_pools);
mask = E1000_SWFW_EEP_SM;
else
mask = E1000_SWFW_PHY0_SM;
+ /* i211 parts do not support this feature */
+ if (hw->mac.type == e1000_i211)
+ hw->mac.arc_subsystem_valid = false;
if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) {
for (i = 0; i <= FW_MAX_RETRIES; i++) {
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED;
fw_cmd.port_num = hw->bus.func;
fw_cmd.drv_version = FW_FAMILY_DRV_VER;
+ fw_cmd.hdr.checksum = 0;
fw_cmd.hdr.checksum = e1000_calculate_checksum((u8 *)&fw_cmd,
(FW_HDR_LEN +
fw_cmd.hdr.buf_len));
u32 dmac_thr;
u16 hwm;
+ if (hw->mac.type == e1000_i211)
+ return;
+
if (hw->mac.type > e1000_82580) {
if (adapter->dmac != IGB_DMAC_DISABLE) {
u32 reg;
/* watchdog timer= msec values in 32usec intervals */
reg |= ((adapter->dmac) >> 5);
+ /*
+ * Disable BMC-to-OS Watchdog enable
+ * on devices that support OS-to-BMC
+ */
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
E1000_WRITE_REG(hw, E1000_DMACR, reg);
/* no lower threshold to disable coalescing(smart fifb)-UTRESH=0*/
ring->queue_index = req.queue;
ring->dev = pci_dev_to_dev(adapter->pdev);
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
adapter->tx_ring[req.queue] = ring;
err = igb_setup_tx_resources(adapter->tx_ring[req.queue]);
*
* Default Value: 1
*/
-IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1=number of cpus");
+IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
#define DEFAULT_RSS 1
#define MAX_RSS 8
IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. "
"Only available when max_vfs is greater than 0");
+#ifdef DEBUG
+
+/* Disable Hardware Reset on Tx Hang
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled, i.e. h/w will reset)
+ */
+IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
+
+/* Dump Transmit and Receive buffers
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
+
+#endif /* DEBUG */
/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
*
*
* Default Value: 1
*/
-IGB_PARAM(QueuePairs, "Enable TX/RX queue pairs for interrupt handling (0,1), default 1=on");
+IGB_PARAM(QueuePairs, "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
#define DEFAULT_QUEUE_PAIRS 1
#define MAX_QUEUE_PAIRS 1
.arg = { .r = { .min = MIN_RSS,
.max = MAX_RSS } }
};
-
- if (adapter->vmdq_pools) {
switch (hw->mac.type) {
-#ifndef CONFIG_IGB_VMDQ_NETDEV
- case e1000_82576:
- opt.arg.r.max = 2;
- break;
case e1000_82575:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools) {
+ if (adapter->vmdq_pools <= 2) {
if (adapter->vmdq_pools == 2)
opt.arg.r.max = 3;
- if (adapter->vmdq_pools <= 2)
- break;
-#endif
- default:
+ } else {
opt.arg.r.max = 1;
- break;
}
+ } else {
+ opt.arg.r.max = 4;
}
- switch (hw->mac.type) {
- case e1000_i211:
- opt.arg.r.max = 2;
+#else
+ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
break;
- case e1000_82575:
case e1000_i210:
opt.arg.r.max = 4;
break;
+ case e1000_i211:
+ opt.arg.r.max = 2;
+ break;
+ case e1000_82576:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 2;
+ break;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ case e1000_82580:
+ case e1000_i350:
default:
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 1;
break;
}
}
#endif
}
- { /* QueuePairs - Enable TX/RX queue pairs for interrupt handling */
+ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
struct igb_option opt = {
.type = enable_option,
- .name = "QueuePairs - TX/RX queue pairs for interrupt handling",
+ .name = "QueuePairs - Tx/Rx queue pairs for interrupt handling",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
#endif
unsigned int qp = QueuePairs[bd];
/*
- * we must enable queue pairs if the number of queues
- * exceeds the number of avaialble interrupts. We are
- * limited to 10, or 3 per unallocated vf.
+ * We must enable queue pairs if the number of queues
+ * exceeds the number of available interrupts. We are
+ * limited to 10, or 3 per unallocated vf. On I210 and
+ * I211 devices, we are limited to 5 interrupts.
+ * However, since I211 only supports 2 queues, we do not
+ * need to check and override the user option.
*/
- if ((adapter->rss_queues > 4) ||
- (adapter->vmdq_pools > 4) ||
- ((adapter->rss_queues > 1) &&
- ((adapter->vmdq_pools > 3) ||
- (adapter->vfs_allocated_count > 6)))) {
if (qp == OPTION_DISABLED) {
+ if (adapter->rss_queues > 4)
qp = OPTION_ENABLED;
- DPRINTK(PROBE, INFO,
- "Number of queues exceeds available interrupts, %s\n",opt.err);
- }
+ if (adapter->vmdq_pools > 4)
+ qp = OPTION_ENABLED;
+
+ if (adapter->rss_queues > 1 &&
+ (adapter->vmdq_pools > 3 ||
+ adapter->vfs_allocated_count > 6))
+ qp = OPTION_ENABLED;
+
+ if (hw->mac.type == e1000_i210 &&
+ adapter->rss_queues > 2)
+ qp = OPTION_ENABLED;
+
+ if (qp == OPTION_ENABLED)
+ DPRINTK(PROBE, INFO, "Number of queues exceeds available interrupts, %s\n",
+ opt.err);
}
igb_validate_option(&qp, &opt, adapter);
adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
#endif
}
#endif /* IGB_NO_LRO */
- { /* Node assignment */
- static struct igb_option opt = {
- .type = range_option,
- .name = "Node to start on",
- .err = "defaulting to -1",
-#ifdef HAVE_EARLY_VMALLOC_NODE
- .def = 0,
-#else
- .def = -1,
-#endif
- .arg = { .r = { .min = 0,
- .max = (MAX_NUMNODES - 1)}}
- };
- int node_param = opt.def;
-
- /* if the default was zero then we need to set the
- * default value to an online node, which is not
- * necessarily zero, and the constant initializer
- * above can't take first_online_node */
- if (node_param == 0)
- /* must set opt.def for validate */
- opt.def = node_param = first_online_node;
-
-#ifdef module_param_array
- if (num_Node > bd) {
-#endif
- node_param = Node[bd];
- igb_validate_option((uint *)&node_param, &opt, adapter);
-
- if (node_param != OPTION_UNSET) {
- DPRINTK(PROBE, INFO, "node set to %d\n", node_param);
- }
-#ifdef module_param_array
- }
-#endif
-
- /* check sanity of the value */
- if (node_param != -1 && !node_online(node_param)) {
- DPRINTK(PROBE, INFO,
- "ignoring node set to invalid value %d\n",
- node_param);
- node_param = opt.def;
- }
-
- adapter->node = node_param;
- }
{ /* MDD - Enable Malicious Driver Detection. Only available when
SR-IOV is enabled. */
struct igb_option opt = {
return false;
hw = &adapter->hw;
- status = e1000_set_i2c_bb(hw);
- if (status != E1000_SUCCESS)
- return false;
+ /*
+ * Only set I2C bit-bang mode if an external thermal sensor is
+ * supported on this device.
+ */
+ if (adapter->ets) {
+ status = e1000_set_i2c_bb(hw);
+ if (status != E1000_SUCCESS)
+ return false;
+ }
status = hw->mac.ops.init_thermal_sensor_thresh(hw);
if (status != E1000_SUCCESS)
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
- return snprintf(page, count, "%d.%d-%d\n",
- (adapter->fw_version & 0xF000) >> 12,
- (adapter->fw_version & 0x0FF0) >> 4,
- adapter->fw_version & 0x000F);
+ return snprintf(page, count, "0x%08x\n", adapter->etrack_id);
}
static int igb_numeports(char *page, char **start, off_t off, int count,
-/*
- * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
- *
- * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
-#ifdef CONFIG_PTP
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+/******************************************************************************
+ Copyright(c) 2011 Richard Cochran <richardcochran@gmail.com> for some of the
+ 82576 and 82580 code
+******************************************************************************/
+
+#include "igb.h"
+
+#ifdef CONFIG_IGB_PTP
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
-#include "igb.h"
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ /* fall through */
default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
if ((hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
/* Dial the nominal frequency. */
- E1000_WRITE_REG(hw,E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
break;
case e1000_82580:
case e1000_i350:
}
E1000_WRITE_FLUSH(hw);
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
/* Initialize the clock and overflow work for devices that need it. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
- spin_lock_init(&adapter->tmreg_lock);
-
- if (hw->mac.type == e1000_82576)
- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
schedule_delayed_work(&adapter->ptp_overflow_work,
IGB_SYSTIM_OVERFLOW_PERIOD);
} else {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
+ adapter->flags |= IGB_FLAG_PTP;
}
}
{
switch (adapter->hw.mac.type) {
case e1000_82576:
- cancel_work_sync(&adapter->ptp_tx_work);
- /* fall through */
case e1000_82580:
case e1000_i350:
cancel_delayed_work_sync(&adapter->ptp_overflow_work);
break;
case e1000_i210:
case e1000_i211:
- /* No work to cancel, but we need to unregister the clock. */
+ /* No delayed work to cancel. */
+ break;
default:
return;
}
+ cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
adapter->netdev->name);
+ adapter->flags &= ~IGB_FLAG_PTP;
}
}
-#endif
+/**
+ * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
+ *
+ * This function handles the reset work required to re-enable the PTP device.
+ **/
+void igb_ptp_reset(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags & IGB_FLAG_PTP))
+ return;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* Dial the nominal frequency. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable the timer functions and interrupts. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+ return;
+ }
+
+ /* Re-initialize the timer. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
+
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+}
+#endif /* CONFIG_IGB_PTP */
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* RDH is read-only for i210, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
if (adapter == NULL)
return false;
+ /*
+ * Only set I2C bit-bang mode if an external thermal sensor is
+ * supported on this device.
+ */
+ if (adapter->ets) {
status = e1000_set_i2c_bb(&(adapter->hw));
if (status != E1000_SUCCESS)
return false;
+ }
status = e1000_init_thermal_sensor_thresh(&(adapter->hw));
if (status != E1000_SUCCESS)
struct kobj_attribute *attr, char *buf)
{
struct igb_adapter *adapter = igb_get_adapter(kobj);
- u16 nvm_ver;
if (adapter == NULL)
return snprintf(buf, PAGE_SIZE, "error: no adapter\n");
- nvm_ver = adapter->fw_version;
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", nvm_ver);
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", adapter->etrack_id);
}
static ssize_t igb_numeports(struct kobject *kobj,
if (hw == NULL)
return snprintf(buf, PAGE_SIZE, "error: no hw data\n");
- /* CMW taking the original out so and assigning ports generally
+ /* CMW taking the original out so assigning ports generally
* by mac type for now. Want to have the daemon handle this some
* other way due to the variability of the 1GB parts.
*/
return snprintf(buf, PAGE_SIZE,
"error: invalid sensor name %s\n", kobj->name);
- if (idx >= E1000_MAX_SENSORS)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
return snprintf(buf, PAGE_SIZE, "%d\n",
adapter->hw.mac.thermal_sensor_data.sensor[idx].location);
}
return snprintf(buf, PAGE_SIZE, "error: status %d returned",
status);
- if (NULL == adapter)
- return snprintf(buf, PAGE_SIZE, "error: failed to map adapter from kobj");
idx = igb_name_to_idx(kobj->name);
if (idx == -1)
return snprintf(buf, PAGE_SIZE,
"error: invalid sensor name %s\n", kobj->name);
- if (idx >= E1000_MAX_SENSORS)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
return snprintf(buf, PAGE_SIZE, "%d\n",
adapter->hw.mac.thermal_sensor_data.sensor[idx].temp);
}
return snprintf(buf, PAGE_SIZE,
"error: invalid sensor name %s\n", kobj->name);
- if (idx >= E1000_MAX_SENSORS)
- return snprintf(buf, PAGE_SIZE,
- "error: invalid sensor name %s\n", kobj->name);
-
return snprintf(buf, PAGE_SIZE, "%d\n",
adapter->hw.mac.thermal_sensor_data.sensor[idx].max_op_thresh);
}
return ret;
}
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+ uint64_t rem = *n;
+ uint64_t b = base;
+ uint64_t res, d = 1;
+ uint32_t high = rem >> 32;
+
+ /* Reduce the thing a bit first */
+ res = 0;
+ if (high >= base) {
+ high /= base;
+ res = (uint64_t) high << 32;
+ rem -= (uint64_t) (high*base) << 32;
+ }
+
+ while ((int64_t)b > 0 && b < rem) {
+ b = b+b;
+ d = d+d;
+ }
+
+ do {
+ if (rem >= b) {
+ rem -= b;
+ res += d;
+ }
+ b >>= 1;
+ d >>= 1;
+ } while (d);
+
+ *n = res;
+ return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
#endif /* 2.6.0 => 2.4.6 */
/*****************************************************************************/
out:
return err;
}
+#endif /* < 2.6.28 */
-void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size)
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev *dev)
{
- skb_fill_page_desc(skb, i, page, off, size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
+ int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *vfdev;
+
+ /* loop through all ethernet devices starting at PF dev */
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == dev)
+ num_vf++;
+
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
}
-#endif /* < 2.6.28 */
+#endif
+ return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
#ifndef CONFIG_NETDEVICES_MULTIQUEUE
void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
}
}
#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
#endif /* HAVE_TX_MQ */
#endif /* < 2.6.35 */
#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
#endif /* < 2.6.39 */
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#endif /* < 3.4.0 */
#define __GFP_COLD 0
#endif
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
/*****************************************************************************/
/* Installations with ethtool version without eeprom, adapter id, or statistics
* support */
pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
#define dev_warn(dev, fmt, args...) \
pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...) \
+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
/* NOTE: dangerous! we ignore the 'gfp' argument */
#define dma_alloc_coherent(dev,sz,dma,gfp) \
#define dma_unmap_single(dev,a,b,c) \
pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_map_sg(dev, sg, nents, dir) \
+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
#define dma_sync_single(dev,a,b,c) \
pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
#endif /* strlcpy */
+#ifndef do_div
+#if BITS_PER_LONG == 64
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+#elif BITS_PER_LONG == 32
+extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = _kc__div64_32(&(n), __base); \
+ __rem; \
+ })
+#else /* BITS_PER_LONG == ?? */
+# error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
#endif /* 2.6.0 => 2.5.28 */
/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
/* 2.6.4 => 2.6.0 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
#undef node_online_map
#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
#endif /* < 2.6.10 */
/*****************************************************************************/
#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
#define ADVERTISED_Pause (1 << 13)
#define ADVERTISED_Asym_Pause (1 << 14)
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
#ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_cpu(dev, dma_handle, size, dir)
-#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_device(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
#endif
#endif
#endif /* < 2.6.14 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...) \
+ dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
#ifndef first_online_node
#define first_online_node 0
#endif
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
#if (!((RHEL_RELEASE_CODE && \
((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
PCI_ANY_ID, PCI_ANY_ID, 0, 0
#endif
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
#ifndef round_jiffies
#define round_jiffies(x) x
#endif
#ifndef KERN_CONT
#define KERN_CONT ""
#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+ printk(KERN_ERR fmt, ##arg)
+#endif
#else /* < 2.6.24 */
#define HAVE_ETHTOOL_GET_SSET_COUNT
#define HAVE_NETDEV_NAPI_LIST
}
#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
#endif
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
-#endif
#endif /* < 2.6.28 */
/*****************************************************************************/
#define netdev_for_each_uc_addr(uclist, dev) \
for (uclist = dev->uc_list; uclist; uclist = uclist->next)
#endif
-#else
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#else /* < 2.6.31 */
#ifndef HAVE_NETDEV_STORAGE_ADDRESS
#define HAVE_NETDEV_STORAGE_ADDRESS
#endif
#ifndef HAVE_TRANS_START_IN_QUEUE
#define HAVE_TRANS_START_IN_QUEUE
#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
#endif /* < 2.6.31 */
/*****************************************************************************/
#ifndef __percpu
#define __percpu
#endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
#else /* < 2.6.33 */
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
#define HAVE_NETDEV_OPS_FCOE_GETWWN
#endif
#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
+
#endif /* < 2.6.33 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
#ifndef ETH_FLAG_NTUPLE
#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
#endif
do { \
struct adapter_struct *kc_adapter = netdev_priv(netdev);\
struct pci_dev *pdev = kc_adapter->pdev; \
- printk("%s %s: " format, level, pci_name(pdev), \
- ##args); \
+ printk(level "%s: " format, pci_name(pdev), ##args); \
} while(0)
#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
#define netdev_printk(level, netdev, format, args...) \
#define netdev_info(dev, format, args...) \
netdev_printk(KERN_INFO, dev, format, ##args)
#undef netdev_dbg
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
#ifdef HAVE_TX_MQ
#include <net/sch_generic.h>
#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
#define netif_set_real_num_tx_queues(_netdev, _count) \
do { \
(_netdev)->egress_subqueue_count = _count; \
} while (0)
#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#else
+#else /* HAVE_TX_MQ */
#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
#endif /* HAVE_TX_MQ */
#ifndef ETH_FLAG_RXHASH
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
+ unsigned int rxq)
+{
+ return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+ __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
#endif
}
#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
#endif
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP (1 << 0)
+#define SKBTX_IN_PROGRESS (1 << 2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
#ifndef HAVE_VLAN_RX_REGISTER
#define HAVE_VLAN_RX_REGISTER
#else /* < 2.6.40 */
#define HAVE_ETHTOOL_SET_PHYS_ID
#endif /* < 2.6.40 */
+/*****************************************************************************/
+
+/*****************************************************************************/
+#undef CONFIG_IGB_PTP
+#ifdef CONFIG_PTP
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && (defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE))
+#define CONFIG_IGB_PTP
+#else
+#error Cannot enable PTP Hardware Clock due to insufficient kernel support
+#endif
+#endif /* IGB_PTP */
+
+/*****************************************************************************/
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
put_page(skb_frag_page(frag));
}
#endif /* __skb_frag_unref */
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
#else /* < 3.2.0 */
#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
#define HAVE_PCI_DEV_FLAGS_ASSIGNED
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#define skb_tx_timestamp(skb) do {} while (0)
#else
#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
#endif /* < 3.5.0 */
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#ifdef ETHTOOL_GEEE
+#include <linux/mdio.h>
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+ u32 supported = 0;
+
+ if (eee_cap & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_cap & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_cap & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_cap & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_cap & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_cap & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+#endif /* ETHTOOL_GEEE */
+#endif /* < 3.7.0 */
#endif /* _KCOMPAT_H_ */